ngram
listlengths
0
82k
[ "auth_one.authenticate = Mock(return_value='one') auth_two = BarAuthenticator() auth_two.authenticate = Mock(return_value='two') auth_three", "id_three = FooIdentifier() id_three.identify = Mock(return_value='three') auth_one = BarAuthenticator() auth_one.authenticate", "test_middleware(self): identifier = FooIdentifier() identifier.identify = Mock(return_value='foo') authenticator = BarAuthenticator()", "with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic \\x000'}) def test_error_if_malformed(self): identifier = BasicAuthIdentifier() credentials", "identifier.identify.assert_called_once_with(environ) authenticator.authenticate.assert_called_once_with('foo') self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'}) def test_middleware_skip(self): id_one = FooIdentifier()", "self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(), None)]) def test_middleware(self): identifier = FooIdentifier()", "from cellardoor.authentication import * from cellardoor.authentication.basic import BasicAuthIdentifier class FooIdentifier(Identifier):", "app = Mock(return_value=[]) middleware = AuthenticationMiddleware( app, pairs=[ (id_one, auth_one),", "= AuthenticationMiddleware(app, pairs=[(identifier, authenticator)]) environ = {'skidoo':23} middleware(environ, lambda: None)", "Mock(return_value='one') auth_two = BarAuthenticator() auth_two.authenticate = Mock(return_value='two') auth_three = BarAuthenticator()", "= base64.standard_b64encode('foo:bar') identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) self.assertEquals(identified_credentials, {'username':'foo',", "def test_bad_authenticator(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(), None)]) def test_middleware(self): identifier", "auth.authenticate({}) def test_bad_identifier(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None, BarAuthenticator())]) def test_bad_authenticator(self):", "None, [(FooIdentifier(), None)]) def test_middleware(self): identifier = FooIdentifier() identifier.identify =", "cellardoor import errors from cellardoor.authentication import * from cellardoor.authentication.basic import", "AuthenticationMiddleware, None, [(None, BarAuthenticator())]) def test_bad_authenticator(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(),", "FooIdentifier() id_three.identify = Mock(return_value='three') auth_one = BarAuthenticator() auth_one.authenticate = Mock(return_value='one')", "= Mock(return_value=[]) middleware = AuthenticationMiddleware( app, pairs=[ (id_one, auth_one), (id_two,", "= BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'}) self.assertEquals(credentials, None) def test_error_if_not_base64(self):", "from cellardoor import errors from cellardoor.authentication import * from cellardoor.authentication.basic", "TestAuthentication(unittest.TestCase): def test_abstract_identifier(self): id = Identifier() with self.assertRaises(NotImplementedError): id.identify({}) def", "FooIdentifier() id_two.identify = Mock(return_value='two') id_three = FooIdentifier() id_three.identify = Mock(return_value='three')", "= {} middleware(environ, lambda: None) self.assertEquals(environ, {'cellardoor.identity':'two'}) class TestBasic(unittest.TestCase): def", "authenticator.authenticate = Mock(return_value='bar') app = Mock(return_value=[]) middleware = AuthenticationMiddleware(app, pairs=[(identifier,", "test_pass(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foo:bar') identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic", "= FooIdentifier() id_three.identify = Mock(return_value='three') auth_one = BarAuthenticator() auth_one.authenticate =", "= Mock(return_value='two') id_three = FooIdentifier() id_three.identify = Mock(return_value='three') auth_one =", "test_bad_identifier(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None, BarAuthenticator())]) def test_bad_authenticator(self): self.assertRaises(ValueError, AuthenticationMiddleware,", "Mock(return_value='foo') authenticator = BarAuthenticator() authenticator.authenticate = Mock(return_value='bar') app = Mock(return_value=[])", "auth = Authenticator() with self.assertRaises(NotImplementedError): auth.authenticate({}) def test_bad_identifier(self): self.assertRaises(ValueError, AuthenticationMiddleware,", "= AuthenticationMiddleware( app, pairs=[ (id_one, auth_one), (id_two, auth_two), (id_three, auth_three)", "* from cellardoor.authentication.basic import BasicAuthIdentifier class FooIdentifier(Identifier): pass class BarAuthenticator(Authenticator):", "Mock import base64 from cellardoor import errors from cellardoor.authentication import", "BarAuthenticator() auth_three.authenticate = Mock(return_value='three') app = Mock(return_value=[]) middleware = AuthenticationMiddleware(", "app, pairs=[ (id_one, auth_one), (id_two, auth_two), (id_three, auth_three) ] )", "= base64.standard_b64encode('foobar') with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) def test_pass(self):", "self.assertRaises(NotImplementedError): auth.authenticate({}) def test_bad_identifier(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None, BarAuthenticator())]) def", "= BasicAuthIdentifier() credentials = identifier.identify({}) self.assertEquals(credentials, None) def test_skip_if_not_a_pair(self): identifier", "id.identify({}) def test_abstract_authenticator(self): auth = Authenticator() with self.assertRaises(NotImplementedError): auth.authenticate({}) def", "auth_two), (id_three, auth_three) ] ) environ = {} middleware(environ, lambda:", "def test_bad_identifier(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None, BarAuthenticator())]) def test_bad_authenticator(self): self.assertRaises(ValueError,", "= FooIdentifier() id_one.identify = Mock(return_value=None) id_two = FooIdentifier() id_two.identify =", "def test_skip_if_not_basic(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'}) self.assertEquals(credentials,", "unittest from mock import Mock import base64 from cellardoor import", "lambda: None) self.assertEquals(environ, {'cellardoor.identity':'two'}) class TestBasic(unittest.TestCase): def test_skip_if_no_auth_header(self): identifier =", "= identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'}) self.assertEquals(credentials, None) def test_error_if_not_base64(self): identifier = BasicAuthIdentifier()", "test_abstract_authenticator(self): auth = Authenticator() with self.assertRaises(NotImplementedError): auth.authenticate({}) def test_bad_identifier(self): self.assertRaises(ValueError,", "id_two = FooIdentifier() id_two.identify = Mock(return_value='two') id_three = FooIdentifier() id_three.identify", "def test_abstract_authenticator(self): auth = Authenticator() with self.assertRaises(NotImplementedError): auth.authenticate({}) def test_bad_identifier(self):", "id_one.identify = Mock(return_value=None) id_two = FooIdentifier() id_two.identify = Mock(return_value='two') id_three", "self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic \\x000'}) def test_error_if_malformed(self): identifier = BasicAuthIdentifier() credentials =", "[(None, BarAuthenticator())]) def test_bad_authenticator(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(), None)]) def", "identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'}) self.assertEquals(credentials, None) def", "import errors from cellardoor.authentication import * from cellardoor.authentication.basic import BasicAuthIdentifier", "= Mock(return_value=[]) middleware = AuthenticationMiddleware(app, pairs=[(identifier, authenticator)]) environ = {'skidoo':23}", "= identifier.identify({'HTTP_AUTHORIZATION':'Foo'}) self.assertEquals(credentials, None) def test_skip_if_not_basic(self): identifier = BasicAuthIdentifier() credentials", "Identifier() with self.assertRaises(NotImplementedError): id.identify({}) def test_abstract_authenticator(self): auth = Authenticator() with", "from cellardoor.authentication.basic import BasicAuthIdentifier class FooIdentifier(Identifier): pass class BarAuthenticator(Authenticator): pass", "def test_error_if_malformed(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foobar') with self.assertRaises(errors.IdentificationError):", "Mock(return_value='three') auth_one = BarAuthenticator() auth_one.authenticate = Mock(return_value='one') auth_two = BarAuthenticator()", "= Identifier() with self.assertRaises(NotImplementedError): id.identify({}) def test_abstract_authenticator(self): auth = Authenticator()", "credentials}) def test_pass(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foo:bar') identified_credentials", "import * from cellardoor.authentication.basic import BasicAuthIdentifier class FooIdentifier(Identifier): pass class", "= BarAuthenticator() auth_two.authenticate = Mock(return_value='two') auth_three = BarAuthenticator() auth_three.authenticate =", "identifier = FooIdentifier() identifier.identify = Mock(return_value='foo') authenticator = BarAuthenticator() authenticator.authenticate", "authenticator)]) environ = {'skidoo':23} middleware(environ, lambda: None) identifier.identify.assert_called_once_with(environ) authenticator.authenticate.assert_called_once_with('foo') self.assertEquals(environ,", "class FooIdentifier(Identifier): pass class BarAuthenticator(Authenticator): pass class TestAuthentication(unittest.TestCase): def test_abstract_identifier(self):", "None) identifier.identify.assert_called_once_with(environ) authenticator.authenticate.assert_called_once_with('foo') self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'}) def test_middleware_skip(self): id_one =", "(id_two, auth_two), (id_three, auth_three) ] ) environ = {} middleware(environ,", "{'cellardoor.identity':'two'}) class TestBasic(unittest.TestCase): def test_skip_if_no_auth_header(self): identifier = BasicAuthIdentifier() credentials =", "% credentials}) def test_pass(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foo:bar')", "identifier.identify = Mock(return_value='foo') authenticator = BarAuthenticator() authenticator.authenticate = Mock(return_value='bar') app", "self.assertEquals(environ, {'cellardoor.identity':'two'}) class TestBasic(unittest.TestCase): def test_skip_if_no_auth_header(self): identifier = BasicAuthIdentifier() credentials", "BasicAuthIdentifier() credentials = identifier.identify({}) self.assertEquals(credentials, None) def test_skip_if_not_a_pair(self): identifier =", "BarAuthenticator() auth_two.authenticate = Mock(return_value='two') auth_three = BarAuthenticator() auth_three.authenticate = Mock(return_value='three')", "BarAuthenticator() authenticator.authenticate = Mock(return_value='bar') app = Mock(return_value=[]) middleware = AuthenticationMiddleware(app,", "Mock(return_value='three') app = Mock(return_value=[]) middleware = AuthenticationMiddleware( app, pairs=[ (id_one,", "credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'}) self.assertEquals(credentials, None) def test_skip_if_not_basic(self): identifier = BasicAuthIdentifier()", "identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'}) self.assertEquals(credentials, None) def test_skip_if_not_basic(self):", "BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'}) self.assertEquals(credentials, None) def test_error_if_not_base64(self): identifier", "None) def test_skip_if_not_a_pair(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'}) self.assertEquals(credentials,", "test_abstract_identifier(self): id = Identifier() with self.assertRaises(NotImplementedError): id.identify({}) def test_abstract_authenticator(self): auth", "Authenticator() with self.assertRaises(NotImplementedError): auth.authenticate({}) def test_bad_identifier(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None,", "import Mock import base64 from cellardoor import errors from cellardoor.authentication", "None) def test_error_if_not_base64(self): identifier = BasicAuthIdentifier() with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic \\x000'})", "credentials = base64.standard_b64encode('foo:bar') identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) self.assertEquals(identified_credentials,", "id = Identifier() with self.assertRaises(NotImplementedError): id.identify({}) def test_abstract_authenticator(self): auth =", "auth_one = BarAuthenticator() auth_one.authenticate = Mock(return_value='one') auth_two = BarAuthenticator() auth_two.authenticate", "class TestBasic(unittest.TestCase): def test_skip_if_no_auth_header(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({})", "None)]) def test_middleware(self): identifier = FooIdentifier() identifier.identify = Mock(return_value='foo') authenticator", "auth_three = BarAuthenticator() auth_three.authenticate = Mock(return_value='three') app = Mock(return_value=[]) middleware", "= BasicAuthIdentifier() credentials = base64.standard_b64encode('foobar') with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' %", "[(FooIdentifier(), None)]) def test_middleware(self): identifier = FooIdentifier() identifier.identify = Mock(return_value='foo')", "'cellardoor.identity':'bar'}) def test_middleware_skip(self): id_one = FooIdentifier() id_one.identify = Mock(return_value=None) id_two", "self.assertEquals(credentials, None) def test_skip_if_not_basic(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo", "self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'}) def test_middleware_skip(self): id_one = FooIdentifier() id_one.identify =", "class BarAuthenticator(Authenticator): pass class TestAuthentication(unittest.TestCase): def test_abstract_identifier(self): id = Identifier()", "def test_middleware_skip(self): id_one = FooIdentifier() id_one.identify = Mock(return_value=None) id_two =", "TestBasic(unittest.TestCase): def test_skip_if_no_auth_header(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({}) self.assertEquals(credentials,", "authenticator = BarAuthenticator() authenticator.authenticate = Mock(return_value='bar') app = Mock(return_value=[]) middleware", "with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) def test_pass(self): identifier =", "BarAuthenticator(Authenticator): pass class TestAuthentication(unittest.TestCase): def test_abstract_identifier(self): id = Identifier() with", "Mock(return_value=[]) middleware = AuthenticationMiddleware(app, pairs=[(identifier, authenticator)]) environ = {'skidoo':23} middleware(environ,", "= BarAuthenticator() auth_one.authenticate = Mock(return_value='one') auth_two = BarAuthenticator() auth_two.authenticate =", "test_skip_if_no_auth_header(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({}) self.assertEquals(credentials, None) def", "auth_two.authenticate = Mock(return_value='two') auth_three = BarAuthenticator() auth_three.authenticate = Mock(return_value='three') app", "= Authenticator() with self.assertRaises(NotImplementedError): auth.authenticate({}) def test_bad_identifier(self): self.assertRaises(ValueError, AuthenticationMiddleware, None,", "= BasicAuthIdentifier() with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic \\x000'}) def test_error_if_malformed(self): identifier =", "from mock import Mock import base64 from cellardoor import errors", "= Mock(return_value='foo') authenticator = BarAuthenticator() authenticator.authenticate = Mock(return_value='bar') app =", "Mock(return_value='bar') app = Mock(return_value=[]) middleware = AuthenticationMiddleware(app, pairs=[(identifier, authenticator)]) environ", "credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'}) self.assertEquals(credentials, None) def test_error_if_not_base64(self): identifier =", "self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None, BarAuthenticator())]) def test_bad_authenticator(self): self.assertRaises(ValueError, AuthenticationMiddleware, None,", "credentials = identifier.identify({}) self.assertEquals(credentials, None) def test_skip_if_not_a_pair(self): identifier = BasicAuthIdentifier()", "123'}) self.assertEquals(credentials, None) def test_error_if_not_base64(self): identifier = BasicAuthIdentifier() with self.assertRaises(errors.IdentificationError):", "= Mock(return_value='one') auth_two = BarAuthenticator() auth_two.authenticate = Mock(return_value='two') auth_three =", "with self.assertRaises(NotImplementedError): id.identify({}) def test_abstract_authenticator(self): auth = Authenticator() with self.assertRaises(NotImplementedError):", "middleware(environ, lambda: None) identifier.identify.assert_called_once_with(environ) authenticator.authenticate.assert_called_once_with('foo') self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'}) def test_middleware_skip(self):", "= Mock(return_value='three') auth_one = BarAuthenticator() auth_one.authenticate = Mock(return_value='one') auth_two =", "= Mock(return_value=None) id_two = FooIdentifier() id_two.identify = Mock(return_value='two') id_three =", ") environ = {} middleware(environ, lambda: None) self.assertEquals(environ, {'cellardoor.identity':'two'}) class", "= Mock(return_value='two') auth_three = BarAuthenticator() auth_three.authenticate = Mock(return_value='three') app =", "pairs=[(identifier, authenticator)]) environ = {'skidoo':23} middleware(environ, lambda: None) identifier.identify.assert_called_once_with(environ) authenticator.authenticate.assert_called_once_with('foo')", "auth_three) ] ) environ = {} middleware(environ, lambda: None) self.assertEquals(environ,", "identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'}) self.assertEquals(credentials, None) def test_error_if_not_base64(self): identifier = BasicAuthIdentifier() with", "self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) def test_pass(self): identifier = BasicAuthIdentifier()", "middleware = AuthenticationMiddleware( app, pairs=[ (id_one, auth_one), (id_two, auth_two), (id_three,", "= BarAuthenticator() auth_three.authenticate = Mock(return_value='three') app = Mock(return_value=[]) middleware =", "BasicAuthIdentifier class FooIdentifier(Identifier): pass class BarAuthenticator(Authenticator): pass class TestAuthentication(unittest.TestCase): def", "BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'}) self.assertEquals(credentials, None) def test_skip_if_not_basic(self): identifier =", "identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foobar') with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic %s'", "pass class BarAuthenticator(Authenticator): pass class TestAuthentication(unittest.TestCase): def test_abstract_identifier(self): id =", "(id_one, auth_one), (id_two, auth_two), (id_three, auth_three) ] ) environ =", "pairs=[ (id_one, auth_one), (id_two, auth_two), (id_three, auth_three) ] ) environ", "] ) environ = {} middleware(environ, lambda: None) self.assertEquals(environ, {'cellardoor.identity':'two'})", "errors from cellardoor.authentication import * from cellardoor.authentication.basic import BasicAuthIdentifier class", "FooIdentifier(Identifier): pass class BarAuthenticator(Authenticator): pass class TestAuthentication(unittest.TestCase): def test_abstract_identifier(self): id", "id_two.identify = Mock(return_value='two') id_three = FooIdentifier() id_three.identify = Mock(return_value='three') auth_one", "def test_pass(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foo:bar') identified_credentials =", "FooIdentifier() id_one.identify = Mock(return_value=None) id_two = FooIdentifier() id_two.identify = Mock(return_value='two')", "def test_abstract_identifier(self): id = Identifier() with self.assertRaises(NotImplementedError): id.identify({}) def test_abstract_authenticator(self):", "lambda: None) identifier.identify.assert_called_once_with(environ) authenticator.authenticate.assert_called_once_with('foo') self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'}) def test_middleware_skip(self): id_one", "BarAuthenticator())]) def test_bad_authenticator(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(), None)]) def test_middleware(self):", "= Mock(return_value='bar') app = Mock(return_value=[]) middleware = AuthenticationMiddleware(app, pairs=[(identifier, authenticator)])", "None) def test_skip_if_not_basic(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'})", "BasicAuthIdentifier() with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic \\x000'}) def test_error_if_malformed(self): identifier = BasicAuthIdentifier()", "identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) def test_pass(self): identifier = BasicAuthIdentifier() credentials", "middleware(environ, lambda: None) self.assertEquals(environ, {'cellardoor.identity':'two'}) class TestBasic(unittest.TestCase): def test_skip_if_no_auth_header(self): identifier", "import base64 from cellardoor import errors from cellardoor.authentication import *", "base64.standard_b64encode('foo:bar') identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) self.assertEquals(identified_credentials, {'username':'foo', 'password':'<PASSWORD>'})", "test_error_if_malformed(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foobar') with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic", "authenticator.authenticate.assert_called_once_with('foo') self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'}) def test_middleware_skip(self): id_one = FooIdentifier() id_one.identify", "test_middleware_skip(self): id_one = FooIdentifier() id_one.identify = Mock(return_value=None) id_two = FooIdentifier()", "= BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'}) self.assertEquals(credentials, None) def test_skip_if_not_basic(self): identifier", "{'skidoo':23, 'cellardoor.identity':'bar'}) def test_middleware_skip(self): id_one = FooIdentifier() id_one.identify = Mock(return_value=None)", "pass class TestAuthentication(unittest.TestCase): def test_abstract_identifier(self): id = Identifier() with self.assertRaises(NotImplementedError):", "= FooIdentifier() id_two.identify = Mock(return_value='two') id_three = FooIdentifier() id_three.identify =", "Mock(return_value=[]) middleware = AuthenticationMiddleware( app, pairs=[ (id_one, auth_one), (id_two, auth_two),", "None) self.assertEquals(environ, {'cellardoor.identity':'two'}) class TestBasic(unittest.TestCase): def test_skip_if_no_auth_header(self): identifier = BasicAuthIdentifier()", "def test_skip_if_no_auth_header(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({}) self.assertEquals(credentials, None)", "FooIdentifier() identifier.identify = Mock(return_value='foo') authenticator = BarAuthenticator() authenticator.authenticate = Mock(return_value='bar')", "AuthenticationMiddleware( app, pairs=[ (id_one, auth_one), (id_two, auth_two), (id_three, auth_three) ]", "import BasicAuthIdentifier class FooIdentifier(Identifier): pass class BarAuthenticator(Authenticator): pass class TestAuthentication(unittest.TestCase):", "BasicAuthIdentifier() credentials = base64.standard_b64encode('foo:bar') identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials})", "middleware = AuthenticationMiddleware(app, pairs=[(identifier, authenticator)]) environ = {'skidoo':23} middleware(environ, lambda:", "identifier.identify({'HTTP_AUTHORIZATION':'Basic \\x000'}) def test_error_if_malformed(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foobar')", "def test_error_if_not_base64(self): identifier = BasicAuthIdentifier() with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic \\x000'}) def", "(id_three, auth_three) ] ) environ = {} middleware(environ, lambda: None)", "test_skip_if_not_basic(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo 123'}) self.assertEquals(credentials, None)", "auth_two = BarAuthenticator() auth_two.authenticate = Mock(return_value='two') auth_three = BarAuthenticator() auth_three.authenticate", "test_error_if_not_base64(self): identifier = BasicAuthIdentifier() with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic \\x000'}) def test_error_if_malformed(self):", "= BasicAuthIdentifier() credentials = base64.standard_b64encode('foo:bar') identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' %", "auth_three.authenticate = Mock(return_value='three') app = Mock(return_value=[]) middleware = AuthenticationMiddleware( app,", "with self.assertRaises(NotImplementedError): auth.authenticate({}) def test_bad_identifier(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(None, BarAuthenticator())])", "def test_middleware(self): identifier = FooIdentifier() identifier.identify = Mock(return_value='foo') authenticator =", "base64.standard_b64encode('foobar') with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) def test_pass(self): identifier", "def test_skip_if_not_a_pair(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'}) self.assertEquals(credentials, None)", "auth_one), (id_two, auth_two), (id_three, auth_three) ] ) environ = {}", "cellardoor.authentication.basic import BasicAuthIdentifier class FooIdentifier(Identifier): pass class BarAuthenticator(Authenticator): pass class", "identifier.identify({}) self.assertEquals(credentials, None) def test_skip_if_not_a_pair(self): identifier = BasicAuthIdentifier() credentials =", "self.assertRaises(NotImplementedError): id.identify({}) def test_abstract_authenticator(self): auth = Authenticator() with self.assertRaises(NotImplementedError): auth.authenticate({})", "id_one = FooIdentifier() id_one.identify = Mock(return_value=None) id_two = FooIdentifier() id_two.identify", "= identifier.identify({}) self.assertEquals(credentials, None) def test_skip_if_not_a_pair(self): identifier = BasicAuthIdentifier() credentials", "Mock(return_value='two') id_three = FooIdentifier() id_three.identify = Mock(return_value='three') auth_one = BarAuthenticator()", "identifier = BasicAuthIdentifier() credentials = identifier.identify({}) self.assertEquals(credentials, None) def test_skip_if_not_a_pair(self):", "identifier.identify({'HTTP_AUTHORIZATION':'Foo'}) self.assertEquals(credentials, None) def test_skip_if_not_basic(self): identifier = BasicAuthIdentifier() credentials =", "environ = {} middleware(environ, lambda: None) self.assertEquals(environ, {'cellardoor.identity':'two'}) class TestBasic(unittest.TestCase):", "identifier = BasicAuthIdentifier() with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic \\x000'}) def test_error_if_malformed(self): identifier", "= {'skidoo':23} middleware(environ, lambda: None) identifier.identify.assert_called_once_with(environ) authenticator.authenticate.assert_called_once_with('foo') self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'})", "AuthenticationMiddleware(app, pairs=[(identifier, authenticator)]) environ = {'skidoo':23} middleware(environ, lambda: None) identifier.identify.assert_called_once_with(environ)", "app = Mock(return_value=[]) middleware = AuthenticationMiddleware(app, pairs=[(identifier, authenticator)]) environ =", "cellardoor.authentication import * from cellardoor.authentication.basic import BasicAuthIdentifier class FooIdentifier(Identifier): pass", "credentials = base64.standard_b64encode('foobar') with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials}) def", "environ = {'skidoo':23} middleware(environ, lambda: None) identifier.identify.assert_called_once_with(environ) authenticator.authenticate.assert_called_once_with('foo') self.assertEquals(environ, {'skidoo':23,", "None, [(None, BarAuthenticator())]) def test_bad_authenticator(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(), None)])", "%s' % credentials}) def test_pass(self): identifier = BasicAuthIdentifier() credentials =", "{'skidoo':23} middleware(environ, lambda: None) identifier.identify.assert_called_once_with(environ) authenticator.authenticate.assert_called_once_with('foo') self.assertEquals(environ, {'skidoo':23, 'cellardoor.identity':'bar'}) def", "id_three.identify = Mock(return_value='three') auth_one = BarAuthenticator() auth_one.authenticate = Mock(return_value='one') auth_two", "self.assertEquals(credentials, None) def test_skip_if_not_a_pair(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'})", "test_bad_authenticator(self): self.assertRaises(ValueError, AuthenticationMiddleware, None, [(FooIdentifier(), None)]) def test_middleware(self): identifier =", "BasicAuthIdentifier() credentials = base64.standard_b64encode('foobar') with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic %s' % credentials})", "self.assertEquals(credentials, None) def test_error_if_not_base64(self): identifier = BasicAuthIdentifier() with self.assertRaises(errors.IdentificationError): identifier.identify({'HTTP_AUTHORIZATION':'Basic", "import unittest from mock import Mock import base64 from cellardoor", "\\x000'}) def test_error_if_malformed(self): identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foobar') with", "AuthenticationMiddleware, None, [(FooIdentifier(), None)]) def test_middleware(self): identifier = FooIdentifier() identifier.identify", "= BarAuthenticator() authenticator.authenticate = Mock(return_value='bar') app = Mock(return_value=[]) middleware =", "identifier = BasicAuthIdentifier() credentials = base64.standard_b64encode('foo:bar') identified_credentials = identifier.identify({'HTTP_AUTHORIZATION':'Basic %s'", "= Mock(return_value='three') app = Mock(return_value=[]) middleware = AuthenticationMiddleware( app, pairs=[", "= FooIdentifier() identifier.identify = Mock(return_value='foo') authenticator = BarAuthenticator() authenticator.authenticate =", "class TestAuthentication(unittest.TestCase): def test_abstract_identifier(self): id = Identifier() with self.assertRaises(NotImplementedError): id.identify({})", "test_skip_if_not_a_pair(self): identifier = BasicAuthIdentifier() credentials = identifier.identify({'HTTP_AUTHORIZATION':'Foo'}) self.assertEquals(credentials, None) def", "Mock(return_value=None) id_two = FooIdentifier() id_two.identify = Mock(return_value='two') id_three = FooIdentifier()", "BarAuthenticator() auth_one.authenticate = Mock(return_value='one') auth_two = BarAuthenticator() auth_two.authenticate = Mock(return_value='two')", "base64 from cellardoor import errors from cellardoor.authentication import * from", "{} middleware(environ, lambda: None) self.assertEquals(environ, {'cellardoor.identity':'two'}) class TestBasic(unittest.TestCase): def test_skip_if_no_auth_header(self):", "mock import Mock import base64 from cellardoor import errors from", "Mock(return_value='two') auth_three = BarAuthenticator() auth_three.authenticate = Mock(return_value='three') app = Mock(return_value=[])" ]
[]
[ "fast training beta=1.2, gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8), resizemix=dict(scope=(0.1, 0.8),", "EMA_W = (1 - m) * EMA_W + m *", "m) * EMA_W + m * W momentum=0.9999, warmup='linear', warmup_iters=20", "optimizer_config = dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16) # lr scheduler lr_config =", "True fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic')) optimizer_config = dict(grad_clip=None, update_interval=update_interval,", "hooks custom_hooks = [ dict(type='EMAHook', # EMA_W = (1 -", "epochs. update_interval=update_interval, ), ] # optimizer optimizer = dict( type='AdamW',", "# lr = 5e-4 * (256 * 4) * 4", "pre-trained) automix=dict(mask_adjust=0, lam_margin=0), # require pre-trained mixblock fmix=dict(decay_power=3, size=(224,224), max_soft=0.,", "# interval for accumulate gradient update_interval = 2 # total:", "top_k=None, beta=8), # AttentiveMix+ in this repo (use pre-trained) automix=dict(mask_adjust=0,", "eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8), resizemix=dict(scope=(0.1, 0.8), use_alpha=True), samix=dict(mask_adjust=0, lam_margin=0.08), #", "t_eps=0.8), resizemix=dict(scope=(0.1, 0.8), use_alpha=True), samix=dict(mask_adjust=0, lam_margin=0.08), # require pre-trained mixblock", "accumulate gradient update_interval = 2 # total: 8 x bs256", "type='ClsMixupHead', # mixup CE + label smooth loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1, num_classes=1000,", "update_interval=update_interval, use_fp16=use_fp16) # lr scheduler lr_config = dict( policy='CosineAnnealing', by_epoch=False,", "bs256 x 2 accumulates = bs4096 # additional hooks custom_hooks", "type='MixUpClassification', pretrained=None, alpha=0.2, mix_mode=\"cutmix\", mix_args=dict( attentivemix=dict(grid_size=32, top_k=None, beta=8), # AttentiveMix+", "out_indices=(3,), norm_cfg=dict(type='LN2d', eps=1e-6), act_cfg=dict(type='GELU'), drop_path_rate=0.1, gap_before_final_norm=True, ), head=dict( type='ClsMixupHead', #", "use_alpha=True), samix=dict(mask_adjust=0, lam_margin=0.08), # require pre-trained mixblock ), backbone=dict( type='ConvNeXt',", "lr = 5e-4 * (256 * 4) * 4 accumulate", "= dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic')) optimizer_config = dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16) #", "20 epochs. warmup_ratio=1e-6, ) # runtime settings runner = dict(type='EpochBasedRunner',", "optimizer optimizer = dict( type='AdamW', lr=4e-3, # lr = 5e-4", "'../../../_base_/datasets/imagenet/swin_sz224_4xbs256.py', '../../../_base_/default_runtime.py', ] # model settings model = dict( type='MixUpClassification',", "max_soft=0., reformulate=False), manifoldmix=dict(layer=(0, 3)), puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, # adjust t_batch_size", "accumulates = bs4096 # additional hooks custom_hooks = [ dict(type='EMAHook',", "num_classes=1000, mode='original', loss_weight=1.0), with_avg_pool=False, # gap_before_final_norm is True in_channels=768, num_classes=1000)", "626, warmup_ratio=0.9, # warmup 20 epochs. update_interval=update_interval, ), ] #", "# warmup 20 epochs. update_interval=update_interval, ), ] # optimizer optimizer", "warmup 20 epochs. warmup_ratio=1e-6, ) # runtime settings runner =", "pretrained=None, alpha=0.2, mix_mode=\"cutmix\", mix_args=dict( attentivemix=dict(grid_size=32, top_k=None, beta=8), # AttentiveMix+ in", "dict(weight_decay=0.), 'bias': dict(weight_decay=0.), }) # apex use_fp16 = True fp16", "= 5e-4 * (256 * 4) * 4 accumulate /", "type='ConvNeXt', arch='tiny', out_indices=(3,), norm_cfg=dict(type='LN2d', eps=1e-6), act_cfg=dict(type='GELU'), drop_path_rate=0.1, gap_before_final_norm=True, ), head=dict(", "4e-3 / bs4096 weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_options={ '(bn|ln|gn)(\\d+)?.(weight|bias)': dict(weight_decay=0.),", "alpha=0.2, mix_mode=\"cutmix\", mix_args=dict( attentivemix=dict(grid_size=32, top_k=None, beta=8), # AttentiveMix+ in this", "= dict( type='MixUpClassification', pretrained=None, alpha=0.2, mix_mode=\"cutmix\", mix_args=dict( attentivemix=dict(grid_size=32, top_k=None, beta=8),", "paramwise_options={ '(bn|ln|gn)(\\d+)?.(weight|bias)': dict(weight_decay=0.), 'bias': dict(weight_decay=0.), }) # apex use_fp16 =", "EMA_W + m * W momentum=0.9999, warmup='linear', warmup_iters=20 * 626,", "= dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16) # lr scheduler lr_config = dict(", "samix=dict(mask_adjust=0, lam_margin=0.08), # require pre-trained mixblock ), backbone=dict( type='ConvNeXt', arch='tiny',", "mp=None, block_num=4, # block_num<=4 and mp=2/4 for fast training beta=1.2,", "# total: 8 x bs256 x 2 accumulates = bs4096", "block_num<=4 and mp=2/4 for fast training beta=1.2, gamma=0.5, eta=0.2, neigh_size=4,", "20 epochs. update_interval=update_interval, ), ] # optimizer optimizer = dict(", "betas=(0.9, 0.999), paramwise_options={ '(bn|ln|gn)(\\d+)?.(weight|bias)': dict(weight_decay=0.), 'bias': dict(weight_decay=0.), }) # apex", "lam_margin=0), # require pre-trained mixblock fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False), manifoldmix=dict(layer=(0,", "t_batch_size=32, t_size=-1, # adjust t_batch_size if CUDA out of memory", "= 2 # total: 8 x bs256 x 2 accumulates", "] # optimizer optimizer = dict( type='AdamW', lr=4e-3, # lr", "dict( policy='CosineAnnealing', by_epoch=False, min_lr=1e-5, warmup='linear', warmup_iters=20, warmup_by_epoch=True, # warmup 20", "0.8), use_alpha=True), samix=dict(mask_adjust=0, lam_margin=0.08), # require pre-trained mixblock ), backbone=dict(", "drop_path_rate=0.1, gap_before_final_norm=True, ), head=dict( type='ClsMixupHead', # mixup CE + label", "(use pre-trained) automix=dict(mask_adjust=0, lam_margin=0), # require pre-trained mixblock fmix=dict(decay_power=3, size=(224,224),", "neigh_size=4, n_labels=3, t_eps=0.8), resizemix=dict(scope=(0.1, 0.8), use_alpha=True), samix=dict(mask_adjust=0, lam_margin=0.08), # require", "head=dict( type='ClsMixupHead', # mixup CE + label smooth loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1,", "label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0), with_avg_pool=False, # gap_before_final_norm is True in_channels=768,", "# additional hooks custom_hooks = [ dict(type='EMAHook', # EMA_W =", "m * W momentum=0.9999, warmup='linear', warmup_iters=20 * 626, warmup_ratio=0.9, #", "by_epoch=False, min_lr=1e-5, warmup='linear', warmup_iters=20, warmup_by_epoch=True, # warmup 20 epochs. warmup_ratio=1e-6,", "scheduler lr_config = dict( policy='CosineAnnealing', by_epoch=False, min_lr=1e-5, warmup='linear', warmup_iters=20, warmup_by_epoch=True,", "gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8), resizemix=dict(scope=(0.1, 0.8), use_alpha=True), samix=dict(mask_adjust=0, lam_margin=0.08),", "this repo (use pre-trained) automix=dict(mask_adjust=0, lam_margin=0), # require pre-trained mixblock", "memory mp=None, block_num=4, # block_num<=4 and mp=2/4 for fast training", "and mp=2/4 for fast training beta=1.2, gamma=0.5, eta=0.2, neigh_size=4, n_labels=3,", "use_fp16=use_fp16) # lr scheduler lr_config = dict( policy='CosineAnnealing', by_epoch=False, min_lr=1e-5,", "(1 - m) * EMA_W + m * W momentum=0.9999,", "* (256 * 4) * 4 accumulate / 1024 =", "True in_channels=768, num_classes=1000) ) # interval for accumulate gradient update_interval", "# model settings model = dict( type='MixUpClassification', pretrained=None, alpha=0.2, mix_mode=\"cutmix\",", "# lr scheduler lr_config = dict( policy='CosineAnnealing', by_epoch=False, min_lr=1e-5, warmup='linear',", "attentivemix=dict(grid_size=32, top_k=None, beta=8), # AttentiveMix+ in this repo (use pre-trained)", "= [ '../../../_base_/datasets/imagenet/swin_sz224_4xbs256.py', '../../../_base_/default_runtime.py', ] # model settings model =", "type='AdamW', lr=4e-3, # lr = 5e-4 * (256 * 4)", "# block_num<=4 and mp=2/4 for fast training beta=1.2, gamma=0.5, eta=0.2,", "with_avg_pool=False, # gap_before_final_norm is True in_channels=768, num_classes=1000) ) # interval", "update_interval = 2 # total: 8 x bs256 x 2", "= True fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic')) optimizer_config = dict(grad_clip=None,", "min_lr=1e-5, warmup='linear', warmup_iters=20, warmup_by_epoch=True, # warmup 20 epochs. warmup_ratio=1e-6, )", "'(bn|ln|gn)(\\d+)?.(weight|bias)': dict(weight_decay=0.), 'bias': dict(weight_decay=0.), }) # apex use_fp16 = True", ") # interval for accumulate gradient update_interval = 2 #", "in_channels=768, num_classes=1000) ) # interval for accumulate gradient update_interval =", "apex use_fp16 = True fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic')) optimizer_config", "AttentiveMix+ in this repo (use pre-trained) automix=dict(mask_adjust=0, lam_margin=0), # require", "additional hooks custom_hooks = [ dict(type='EMAHook', # EMA_W = (1", "}) # apex use_fp16 = True fp16 = dict(type='apex', loss_scale=dict(init_scale=512.,", "# warmup 20 epochs. warmup_ratio=1e-6, ) # runtime settings runner", "beta=8), # AttentiveMix+ in this repo (use pre-trained) automix=dict(mask_adjust=0, lam_margin=0),", "accumulate / 1024 = 4e-3 / bs4096 weight_decay=0.05, eps=1e-8, betas=(0.9,", "for fast training beta=1.2, gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8), resizemix=dict(scope=(0.1,", "# require pre-trained mixblock ), backbone=dict( type='ConvNeXt', arch='tiny', out_indices=(3,), norm_cfg=dict(type='LN2d',", "interval for accumulate gradient update_interval = 2 # total: 8", "warmup_ratio=0.9, # warmup 20 epochs. update_interval=update_interval, ), ] # optimizer", "* 4 accumulate / 1024 = 4e-3 / bs4096 weight_decay=0.05,", "/ bs4096 weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_options={ '(bn|ln|gn)(\\d+)?.(weight|bias)': dict(weight_decay=0.), 'bias':", "dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic')) optimizer_config = dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16) # lr", "weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_options={ '(bn|ln|gn)(\\d+)?.(weight|bias)': dict(weight_decay=0.), 'bias': dict(weight_decay=0.), })", "lr_config = dict( policy='CosineAnnealing', by_epoch=False, min_lr=1e-5, warmup='linear', warmup_iters=20, warmup_by_epoch=True, #", "= bs4096 # additional hooks custom_hooks = [ dict(type='EMAHook', #", "dict(weight_decay=0.), }) # apex use_fp16 = True fp16 = dict(type='apex',", "bs4096 weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_options={ '(bn|ln|gn)(\\d+)?.(weight|bias)': dict(weight_decay=0.), 'bias': dict(weight_decay=0.),", "loss_scale=dict(init_scale=512., mode='dynamic')) optimizer_config = dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16) # lr scheduler", "label smooth loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0), with_avg_pool=False, # gap_before_final_norm", "* EMA_W + m * W momentum=0.9999, warmup='linear', warmup_iters=20 *", "# AttentiveMix+ in this repo (use pre-trained) automix=dict(mask_adjust=0, lam_margin=0), #", "= dict( type='AdamW', lr=4e-3, # lr = 5e-4 * (256", "use_fp16 = True fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic')) optimizer_config =", "_base_ = [ '../../../_base_/datasets/imagenet/swin_sz224_4xbs256.py', '../../../_base_/default_runtime.py', ] # model settings model", "if CUDA out of memory mp=None, block_num=4, # block_num<=4 and", "epochs. warmup_ratio=1e-6, ) # runtime settings runner = dict(type='EpochBasedRunner', max_epochs=300)", "# gap_before_final_norm is True in_channels=768, num_classes=1000) ) # interval for", "mp=2/4 for fast training beta=1.2, gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8),", "gap_before_final_norm=True, ), head=dict( type='ClsMixupHead', # mixup CE + label smooth", "block_num=4, # block_num<=4 and mp=2/4 for fast training beta=1.2, gamma=0.5,", "is True in_channels=768, num_classes=1000) ) # interval for accumulate gradient", "x bs256 x 2 accumulates = bs4096 # additional hooks", "* W momentum=0.9999, warmup='linear', warmup_iters=20 * 626, warmup_ratio=0.9, # warmup", "eps=1e-8, betas=(0.9, 0.999), paramwise_options={ '(bn|ln|gn)(\\d+)?.(weight|bias)': dict(weight_decay=0.), 'bias': dict(weight_decay=0.), }) #", "loss_weight=1.0), with_avg_pool=False, # gap_before_final_norm is True in_channels=768, num_classes=1000) ) #", "0.999), paramwise_options={ '(bn|ln|gn)(\\d+)?.(weight|bias)': dict(weight_decay=0.), 'bias': dict(weight_decay=0.), }) # apex use_fp16", "+ label smooth loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0), with_avg_pool=False, #", "total: 8 x bs256 x 2 accumulates = bs4096 #", "dict( type='MixUpClassification', pretrained=None, alpha=0.2, mix_mode=\"cutmix\", mix_args=dict( attentivemix=dict(grid_size=32, top_k=None, beta=8), #", "+ m * W momentum=0.9999, warmup='linear', warmup_iters=20 * 626, warmup_ratio=0.9,", "require pre-trained mixblock ), backbone=dict( type='ConvNeXt', arch='tiny', out_indices=(3,), norm_cfg=dict(type='LN2d', eps=1e-6),", "num_classes=1000) ) # interval for accumulate gradient update_interval = 2", "warmup_iters=20, warmup_by_epoch=True, # warmup 20 epochs. warmup_ratio=1e-6, ) # runtime", "warmup='linear', warmup_iters=20 * 626, warmup_ratio=0.9, # warmup 20 epochs. update_interval=update_interval,", "= [ dict(type='EMAHook', # EMA_W = (1 - m) *", "smooth loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0), with_avg_pool=False, # gap_before_final_norm is", "pre-trained mixblock ), backbone=dict( type='ConvNeXt', arch='tiny', out_indices=(3,), norm_cfg=dict(type='LN2d', eps=1e-6), act_cfg=dict(type='GELU'),", "* 626, warmup_ratio=0.9, # warmup 20 epochs. update_interval=update_interval, ), ]", "puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, # adjust t_batch_size if CUDA out of", "CE + label smooth loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0), with_avg_pool=False,", "adjust t_batch_size if CUDA out of memory mp=None, block_num=4, #", "mixblock ), backbone=dict( type='ConvNeXt', arch='tiny', out_indices=(3,), norm_cfg=dict(type='LN2d', eps=1e-6), act_cfg=dict(type='GELU'), drop_path_rate=0.1,", "), head=dict( type='ClsMixupHead', # mixup CE + label smooth loss=dict(type='LabelSmoothLoss',", "mode='original', loss_weight=1.0), with_avg_pool=False, # gap_before_final_norm is True in_channels=768, num_classes=1000) )", "gap_before_final_norm is True in_channels=768, num_classes=1000) ) # interval for accumulate", "2 # total: 8 x bs256 x 2 accumulates =", "/ 1024 = 4e-3 / bs4096 weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999),", "for accumulate gradient update_interval = 2 # total: 8 x", "= dict( policy='CosineAnnealing', by_epoch=False, min_lr=1e-5, warmup='linear', warmup_iters=20, warmup_by_epoch=True, # warmup", "size=(224,224), max_soft=0., reformulate=False), manifoldmix=dict(layer=(0, 3)), puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, # adjust", "t_batch_size if CUDA out of memory mp=None, block_num=4, # block_num<=4", "'bias': dict(weight_decay=0.), }) # apex use_fp16 = True fp16 =", "automix=dict(mask_adjust=0, lam_margin=0), # require pre-trained mixblock fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False),", "lr scheduler lr_config = dict( policy='CosineAnnealing', by_epoch=False, min_lr=1e-5, warmup='linear', warmup_iters=20,", "n_labels=3, t_eps=0.8), resizemix=dict(scope=(0.1, 0.8), use_alpha=True), samix=dict(mask_adjust=0, lam_margin=0.08), # require pre-trained", "# mixup CE + label smooth loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1, num_classes=1000, mode='original',", "# require pre-trained mixblock fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False), manifoldmix=dict(layer=(0, 3)),", "beta=1.2, gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8), resizemix=dict(scope=(0.1, 0.8), use_alpha=True), samix=dict(mask_adjust=0,", "W momentum=0.9999, warmup='linear', warmup_iters=20 * 626, warmup_ratio=0.9, # warmup 20", "policy='CosineAnnealing', by_epoch=False, min_lr=1e-5, warmup='linear', warmup_iters=20, warmup_by_epoch=True, # warmup 20 epochs.", "5e-4 * (256 * 4) * 4 accumulate / 1024", "warmup_iters=20 * 626, warmup_ratio=0.9, # warmup 20 epochs. update_interval=update_interval, ),", "[ dict(type='EMAHook', # EMA_W = (1 - m) * EMA_W", "require pre-trained mixblock fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False), manifoldmix=dict(layer=(0, 3)), puzzlemix=dict(transport=True,", "in this repo (use pre-trained) automix=dict(mask_adjust=0, lam_margin=0), # require pre-trained", "settings model = dict( type='MixUpClassification', pretrained=None, alpha=0.2, mix_mode=\"cutmix\", mix_args=dict( attentivemix=dict(grid_size=32,", "mix_args=dict( attentivemix=dict(grid_size=32, top_k=None, beta=8), # AttentiveMix+ in this repo (use", "t_size=-1, # adjust t_batch_size if CUDA out of memory mp=None,", "act_cfg=dict(type='GELU'), drop_path_rate=0.1, gap_before_final_norm=True, ), head=dict( type='ClsMixupHead', # mixup CE +", "optimizer = dict( type='AdamW', lr=4e-3, # lr = 5e-4 *", "- m) * EMA_W + m * W momentum=0.9999, warmup='linear',", "arch='tiny', out_indices=(3,), norm_cfg=dict(type='LN2d', eps=1e-6), act_cfg=dict(type='GELU'), drop_path_rate=0.1, gap_before_final_norm=True, ), head=dict( type='ClsMixupHead',", "manifoldmix=dict(layer=(0, 3)), puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, # adjust t_batch_size if CUDA", "out of memory mp=None, block_num=4, # block_num<=4 and mp=2/4 for", "# EMA_W = (1 - m) * EMA_W + m", "dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16) # lr scheduler lr_config = dict( policy='CosineAnnealing',", "4) * 4 accumulate / 1024 = 4e-3 / bs4096", "mixblock fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False), manifoldmix=dict(layer=(0, 3)), puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1,", "* 4) * 4 accumulate / 1024 = 4e-3 /", "= 4e-3 / bs4096 weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_options={ '(bn|ln|gn)(\\d+)?.(weight|bias)':", "dict( type='AdamW', lr=4e-3, # lr = 5e-4 * (256 *", "2 accumulates = bs4096 # additional hooks custom_hooks = [", "x 2 accumulates = bs4096 # additional hooks custom_hooks =", "momentum=0.9999, warmup='linear', warmup_iters=20 * 626, warmup_ratio=0.9, # warmup 20 epochs.", "of memory mp=None, block_num=4, # block_num<=4 and mp=2/4 for fast", "warmup 20 epochs. update_interval=update_interval, ), ] # optimizer optimizer =", "eps=1e-6), act_cfg=dict(type='GELU'), drop_path_rate=0.1, gap_before_final_norm=True, ), head=dict( type='ClsMixupHead', # mixup CE", "fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False), manifoldmix=dict(layer=(0, 3)), puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, #", "3)), puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, # adjust t_batch_size if CUDA out", "lr=4e-3, # lr = 5e-4 * (256 * 4) *", "training beta=1.2, gamma=0.5, eta=0.2, neigh_size=4, n_labels=3, t_eps=0.8), resizemix=dict(scope=(0.1, 0.8), use_alpha=True),", "mix_mode=\"cutmix\", mix_args=dict( attentivemix=dict(grid_size=32, top_k=None, beta=8), # AttentiveMix+ in this repo", "model = dict( type='MixUpClassification', pretrained=None, alpha=0.2, mix_mode=\"cutmix\", mix_args=dict( attentivemix=dict(grid_size=32, top_k=None,", "mode='dynamic')) optimizer_config = dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16) # lr scheduler lr_config", "update_interval=update_interval, ), ] # optimizer optimizer = dict( type='AdamW', lr=4e-3,", "warmup_by_epoch=True, # warmup 20 epochs. warmup_ratio=1e-6, ) # runtime settings", "] # model settings model = dict( type='MixUpClassification', pretrained=None, alpha=0.2,", "repo (use pre-trained) automix=dict(mask_adjust=0, lam_margin=0), # require pre-trained mixblock fmix=dict(decay_power=3,", "reformulate=False), manifoldmix=dict(layer=(0, 3)), puzzlemix=dict(transport=True, t_batch_size=32, t_size=-1, # adjust t_batch_size if", "custom_hooks = [ dict(type='EMAHook', # EMA_W = (1 - m)", "CUDA out of memory mp=None, block_num=4, # block_num<=4 and mp=2/4", "4 accumulate / 1024 = 4e-3 / bs4096 weight_decay=0.05, eps=1e-8,", "1024 = 4e-3 / bs4096 weight_decay=0.05, eps=1e-8, betas=(0.9, 0.999), paramwise_options={", "), ] # optimizer optimizer = dict( type='AdamW', lr=4e-3, #", "warmup='linear', warmup_iters=20, warmup_by_epoch=True, # warmup 20 epochs. warmup_ratio=1e-6, ) #", "# optimizer optimizer = dict( type='AdamW', lr=4e-3, # lr =", "[ '../../../_base_/datasets/imagenet/swin_sz224_4xbs256.py', '../../../_base_/default_runtime.py', ] # model settings model = dict(", "lam_margin=0.08), # require pre-trained mixblock ), backbone=dict( type='ConvNeXt', arch='tiny', out_indices=(3,),", "loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0), with_avg_pool=False, # gap_before_final_norm is True", "fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic')) optimizer_config = dict(grad_clip=None, update_interval=update_interval, use_fp16=use_fp16)", "model settings model = dict( type='MixUpClassification', pretrained=None, alpha=0.2, mix_mode=\"cutmix\", mix_args=dict(", "bs4096 # additional hooks custom_hooks = [ dict(type='EMAHook', # EMA_W", "8 x bs256 x 2 accumulates = bs4096 # additional", "dict(type='EMAHook', # EMA_W = (1 - m) * EMA_W +", "), backbone=dict( type='ConvNeXt', arch='tiny', out_indices=(3,), norm_cfg=dict(type='LN2d', eps=1e-6), act_cfg=dict(type='GELU'), drop_path_rate=0.1, gap_before_final_norm=True,", "gradient update_interval = 2 # total: 8 x bs256 x", "# apex use_fp16 = True fp16 = dict(type='apex', loss_scale=dict(init_scale=512., mode='dynamic'))", "resizemix=dict(scope=(0.1, 0.8), use_alpha=True), samix=dict(mask_adjust=0, lam_margin=0.08), # require pre-trained mixblock ),", "(256 * 4) * 4 accumulate / 1024 = 4e-3", "pre-trained mixblock fmix=dict(decay_power=3, size=(224,224), max_soft=0., reformulate=False), manifoldmix=dict(layer=(0, 3)), puzzlemix=dict(transport=True, t_batch_size=32,", "backbone=dict( type='ConvNeXt', arch='tiny', out_indices=(3,), norm_cfg=dict(type='LN2d', eps=1e-6), act_cfg=dict(type='GELU'), drop_path_rate=0.1, gap_before_final_norm=True, ),", "'../../../_base_/default_runtime.py', ] # model settings model = dict( type='MixUpClassification', pretrained=None,", "# adjust t_batch_size if CUDA out of memory mp=None, block_num=4,", "= (1 - m) * EMA_W + m * W", "norm_cfg=dict(type='LN2d', eps=1e-6), act_cfg=dict(type='GELU'), drop_path_rate=0.1, gap_before_final_norm=True, ), head=dict( type='ClsMixupHead', # mixup", "mixup CE + label smooth loss=dict(type='LabelSmoothLoss', label_smooth_val=0.1, num_classes=1000, mode='original', loss_weight=1.0)," ]
[ "product_filename is available if os.path.isfile(filename): if force: os.remove(filename) else: raise", "the McStas instr \"\"\" def __init__(self, filename): \"\"\" Initialization of", "already exists, you can overwrite with \" + \"force=True\") self.Reader.generate_py_version(filename)", "McStas_instr instance to add instrument information to \"\"\" # Check", "filename Parameters ---------- filename (str) Name of McStas instrument file", "# Check filename if not os.path.isfile(filename): raise ValueError(\"Given filename, \\\"\"", "python file that reproduces McStas instrument file Parameters ---------- filename", "filename, **kwargs): \"\"\" Writes python file that reproduces McStas instrument", "Filename of python file to be written \"\"\" if \"force\"", "McStas instrument file to be read \"\"\" # Check filename", "McStas files, can add to an existing McStasScript instrument instance", "an existing McStasScript instrument instance or create a corresponding McStasScript", "force = False # Check product_filename is available if os.path.isfile(filename):", "os from mcstasscript.instr_reader.control import InstrumentReader from mcstasscript.interface.instr import McStas_instr class", "that reproduces McStas instrument file Parameters ---------- filename (str) Filename", "to \"\"\" # Check Instr if not isinstance(Instr, McStas_instr): raise", "Write python file named filename that reproduce the McStas instr", "from the McStas file to McStasScript instr Parameters ---------- Instr", "not of type McStas_instr!\") self.Reader.add_to_instr(Instr) def write_python_file(self, filename, **kwargs): \"\"\"", "Parameters ---------- Instr (McStasScript McStas_instr instance) McStas_instr instance to add", "kwargs: force = kwargs[\"force\"] else: force = False # Check", "instrument file Parameters ---------- filename (str) Filename of python file", "write_python_file(filename) Write python file named filename that reproduce the McStas", "McStas instr \"\"\" def __init__(self, filename): \"\"\" Initialization of McStas_file", "of McStas files, can add to an existing McStasScript instrument", "instrument information to \"\"\" # Check Instr if not isinstance(Instr,", "file to McStasScript instr Parameters ---------- Instr (McStasScript McStas_instr instance)", "ValueError(\"Filename \\\"\" + filename + \"\\\" already exists, you can", "filename, \\\"\" + filename + \"\\\" could not be found.\")", "Parameters ---------- filename (str) Filename of python file to be", "raise ValueError(\"Given filename, \\\"\" + filename + \"\\\" could not", "McStas instrument file Parameters ---------- filename (str) Filename of python", "in kwargs: force = kwargs[\"force\"] else: force = False #", "+ \"\\\" already exists, you can overwrite with \" +", "of McStas instrument file to be read \"\"\" # Check", "os.path.isfile(filename): if force: os.remove(filename) else: raise ValueError(\"Filename \\\"\" + filename", "\"\"\" Adds information from the McStas file to McStasScript instr", "read \"\"\" # Check filename if not os.path.isfile(filename): raise ValueError(\"Given", "Add information from McStas file to McStasScript Instr instance write_python_file(filename)", "to an existing McStasScript instrument instance or create a corresponding", "McStasScript python file. Methods ------- add_to_instr(Instr) Add information from McStas", "\"\"\" Writes python file that reproduces McStas instrument file Parameters", "\"force\" in kwargs: force = kwargs[\"force\"] else: force = False", "def write_python_file(self, filename, **kwargs): \"\"\" Writes python file that reproduces", "Instr if not isinstance(Instr, McStas_instr): raise TypeError(\"Given object is not", "information from the McStas file to McStasScript instr Parameters ----------", "\"\\\" could not be found.\") self.Reader = InstrumentReader(filename) def add_to_instr(self,", "python file to be written \"\"\" if \"force\" in kwargs:", "add_to_instr(self, Instr): \"\"\" Adds information from the McStas file to", "file Parameters ---------- filename (str) Filename of python file to", "from McStas file to McStasScript Instr instance write_python_file(filename) Write python", "__init__(self, filename): \"\"\" Initialization of McStas_file class, needs McStas instr", "------- add_to_instr(Instr) Add information from McStas file to McStasScript Instr", "\"\"\" Reader of McStas files, can add to an existing", "isinstance(Instr, McStas_instr): raise TypeError(\"Given object is not of type McStas_instr!\")", "---------- Instr (McStasScript McStas_instr instance) McStas_instr instance to add instrument", "of python file to be written \"\"\" if \"force\" in", "filename if not os.path.isfile(filename): raise ValueError(\"Given filename, \\\"\" + filename", "else: raise ValueError(\"Filename \\\"\" + filename + \"\\\" already exists,", "Methods ------- add_to_instr(Instr) Add information from McStas file to McStasScript", "to add instrument information to \"\"\" # Check Instr if", "InstrumentReader from mcstasscript.interface.instr import McStas_instr class McStas_file: \"\"\" Reader of", "Instr (McStasScript McStas_instr instance) McStas_instr instance to add instrument information", "instr filename Parameters ---------- filename (str) Name of McStas instrument", "be written \"\"\" if \"force\" in kwargs: force = kwargs[\"force\"]", "instance write_python_file(filename) Write python file named filename that reproduce the", "the McStas file to McStasScript instr Parameters ---------- Instr (McStasScript", "self.Reader.add_to_instr(Instr) def write_python_file(self, filename, **kwargs): \"\"\" Writes python file that", "import InstrumentReader from mcstasscript.interface.instr import McStas_instr class McStas_file: \"\"\" Reader", "not os.path.isfile(filename): raise ValueError(\"Given filename, \\\"\" + filename + \"\\\"", "\\\"\" + filename + \"\\\" could not be found.\") self.Reader", "InstrumentReader(filename) def add_to_instr(self, Instr): \"\"\" Adds information from the McStas", "file that reproduces McStas instrument file Parameters ---------- filename (str)", "= False # Check product_filename is available if os.path.isfile(filename): if", "instr Parameters ---------- Instr (McStasScript McStas_instr instance) McStas_instr instance to", "existing McStasScript instrument instance or create a corresponding McStasScript python", "McStas file to McStasScript Instr instance write_python_file(filename) Write python file", "# Check Instr if not isinstance(Instr, McStas_instr): raise TypeError(\"Given object", "reproduce the McStas instr \"\"\" def __init__(self, filename): \"\"\" Initialization", "instr \"\"\" def __init__(self, filename): \"\"\" Initialization of McStas_file class,", "object is not of type McStas_instr!\") self.Reader.add_to_instr(Instr) def write_python_file(self, filename,", "+ \"\\\" could not be found.\") self.Reader = InstrumentReader(filename) def", "McStas_file: \"\"\" Reader of McStas files, can add to an", "filename + \"\\\" already exists, you can overwrite with \"", "instance or create a corresponding McStasScript python file. Methods -------", "if not os.path.isfile(filename): raise ValueError(\"Given filename, \\\"\" + filename +", "if force: os.remove(filename) else: raise ValueError(\"Filename \\\"\" + filename +", "information to \"\"\" # Check Instr if not isinstance(Instr, McStas_instr):", "McStas instr filename Parameters ---------- filename (str) Name of McStas", "\"\"\" if \"force\" in kwargs: force = kwargs[\"force\"] else: force", "class McStas_file: \"\"\" Reader of McStas files, can add to", "found.\") self.Reader = InstrumentReader(filename) def add_to_instr(self, Instr): \"\"\" Adds information", "import McStas_instr class McStas_file: \"\"\" Reader of McStas files, can", "write_python_file(self, filename, **kwargs): \"\"\" Writes python file that reproduces McStas", "False # Check product_filename is available if os.path.isfile(filename): if force:", "create a corresponding McStasScript python file. Methods ------- add_to_instr(Instr) Add", "McStas_instr instance) McStas_instr instance to add instrument information to \"\"\"", "instance) McStas_instr instance to add instrument information to \"\"\" #", "files, can add to an existing McStasScript instrument instance or", "type McStas_instr!\") self.Reader.add_to_instr(Instr) def write_python_file(self, filename, **kwargs): \"\"\" Writes python", "to McStasScript Instr instance write_python_file(filename) Write python file named filename", "named filename that reproduce the McStas instr \"\"\" def __init__(self,", "Check product_filename is available if os.path.isfile(filename): if force: os.remove(filename) else:", "needs McStas instr filename Parameters ---------- filename (str) Name of", "\"\\\" already exists, you can overwrite with \" + \"force=True\")", "file to McStasScript Instr instance write_python_file(filename) Write python file named", "raise TypeError(\"Given object is not of type McStas_instr!\") self.Reader.add_to_instr(Instr) def", "file to be written \"\"\" if \"force\" in kwargs: force", "written \"\"\" if \"force\" in kwargs: force = kwargs[\"force\"] else:", "os.remove(filename) else: raise ValueError(\"Filename \\\"\" + filename + \"\\\" already", "---------- filename (str) Name of McStas instrument file to be", "(str) Name of McStas instrument file to be read \"\"\"", "Check Instr if not isinstance(Instr, McStas_instr): raise TypeError(\"Given object is", "\"\"\" Initialization of McStas_file class, needs McStas instr filename Parameters", "Check filename if not os.path.isfile(filename): raise ValueError(\"Given filename, \\\"\" +", "a corresponding McStasScript python file. Methods ------- add_to_instr(Instr) Add information", "filename (str) Name of McStas instrument file to be read", "if \"force\" in kwargs: force = kwargs[\"force\"] else: force =", "def add_to_instr(self, Instr): \"\"\" Adds information from the McStas file", "be found.\") self.Reader = InstrumentReader(filename) def add_to_instr(self, Instr): \"\"\" Adds", "(McStasScript McStas_instr instance) McStas_instr instance to add instrument information to", "McStas_file class, needs McStas instr filename Parameters ---------- filename (str)", "from mcstasscript.instr_reader.control import InstrumentReader from mcstasscript.interface.instr import McStas_instr class McStas_file:", "or create a corresponding McStasScript python file. Methods ------- add_to_instr(Instr)", "corresponding McStasScript python file. Methods ------- add_to_instr(Instr) Add information from", "class, needs McStas instr filename Parameters ---------- filename (str) Name", "mcstasscript.instr_reader.control import InstrumentReader from mcstasscript.interface.instr import McStas_instr class McStas_file: \"\"\"", "McStas_instr!\") self.Reader.add_to_instr(Instr) def write_python_file(self, filename, **kwargs): \"\"\" Writes python file", "python file named filename that reproduce the McStas instr \"\"\"", "not isinstance(Instr, McStas_instr): raise TypeError(\"Given object is not of type", "---------- filename (str) Filename of python file to be written", "reproduces McStas instrument file Parameters ---------- filename (str) Filename of", "not be found.\") self.Reader = InstrumentReader(filename) def add_to_instr(self, Instr): \"\"\"", "Parameters ---------- filename (str) Name of McStas instrument file to", "\"\"\" # Check filename if not os.path.isfile(filename): raise ValueError(\"Given filename,", "Instr): \"\"\" Adds information from the McStas file to McStasScript", "McStas_instr): raise TypeError(\"Given object is not of type McStas_instr!\") self.Reader.add_to_instr(Instr)", "to be written \"\"\" if \"force\" in kwargs: force =", "force: os.remove(filename) else: raise ValueError(\"Filename \\\"\" + filename + \"\\\"", "from mcstasscript.interface.instr import McStas_instr class McStas_file: \"\"\" Reader of McStas", "instrument file to be read \"\"\" # Check filename if", "be read \"\"\" # Check filename if not os.path.isfile(filename): raise", "filename + \"\\\" could not be found.\") self.Reader = InstrumentReader(filename)", "+ filename + \"\\\" already exists, you can overwrite with", "filename): \"\"\" Initialization of McStas_file class, needs McStas instr filename", "Instr instance write_python_file(filename) Write python file named filename that reproduce", "of McStas_file class, needs McStas instr filename Parameters ---------- filename", "add instrument information to \"\"\" # Check Instr if not", "\"\"\" # Check Instr if not isinstance(Instr, McStas_instr): raise TypeError(\"Given", "add to an existing McStasScript instrument instance or create a", "\\\"\" + filename + \"\\\" already exists, you can overwrite", "of type McStas_instr!\") self.Reader.add_to_instr(Instr) def write_python_file(self, filename, **kwargs): \"\"\" Writes", "available if os.path.isfile(filename): if force: os.remove(filename) else: raise ValueError(\"Filename \\\"\"", "os.path.isfile(filename): raise ValueError(\"Given filename, \\\"\" + filename + \"\\\" could", "instance to add instrument information to \"\"\" # Check Instr", "ValueError(\"Given filename, \\\"\" + filename + \"\\\" could not be", "Reader of McStas files, can add to an existing McStasScript", "**kwargs): \"\"\" Writes python file that reproduces McStas instrument file", "Adds information from the McStas file to McStasScript instr Parameters", "is available if os.path.isfile(filename): if force: os.remove(filename) else: raise ValueError(\"Filename", "Initialization of McStas_file class, needs McStas instr filename Parameters ----------", "python file. Methods ------- add_to_instr(Instr) Add information from McStas file", "TypeError(\"Given object is not of type McStas_instr!\") self.Reader.add_to_instr(Instr) def write_python_file(self,", "instrument instance or create a corresponding McStasScript python file. Methods", "mcstasscript.interface.instr import McStas_instr class McStas_file: \"\"\" Reader of McStas files,", "that reproduce the McStas instr \"\"\" def __init__(self, filename): \"\"\"", "# Check product_filename is available if os.path.isfile(filename): if force: os.remove(filename)", "\"\"\" def __init__(self, filename): \"\"\" Initialization of McStas_file class, needs", "force = kwargs[\"force\"] else: force = False # Check product_filename", "add_to_instr(Instr) Add information from McStas file to McStasScript Instr instance", "information from McStas file to McStasScript Instr instance write_python_file(filename) Write", "McStas file to McStasScript instr Parameters ---------- Instr (McStasScript McStas_instr", "= kwargs[\"force\"] else: force = False # Check product_filename is", "McStasScript instr Parameters ---------- Instr (McStasScript McStas_instr instance) McStas_instr instance", "Name of McStas instrument file to be read \"\"\" #", "McStasScript instrument instance or create a corresponding McStasScript python file.", "kwargs[\"force\"] else: force = False # Check product_filename is available", "McStas_instr class McStas_file: \"\"\" Reader of McStas files, can add", "could not be found.\") self.Reader = InstrumentReader(filename) def add_to_instr(self, Instr):", "to McStasScript instr Parameters ---------- Instr (McStasScript McStas_instr instance) McStas_instr", "if os.path.isfile(filename): if force: os.remove(filename) else: raise ValueError(\"Filename \\\"\" +", "is not of type McStas_instr!\") self.Reader.add_to_instr(Instr) def write_python_file(self, filename, **kwargs):", "= InstrumentReader(filename) def add_to_instr(self, Instr): \"\"\" Adds information from the", "else: force = False # Check product_filename is available if", "filename (str) Filename of python file to be written \"\"\"", "+ filename + \"\\\" could not be found.\") self.Reader =", "import os from mcstasscript.instr_reader.control import InstrumentReader from mcstasscript.interface.instr import McStas_instr", "file. Methods ------- add_to_instr(Instr) Add information from McStas file to", "file named filename that reproduce the McStas instr \"\"\" def", "(str) Filename of python file to be written \"\"\" if", "self.Reader = InstrumentReader(filename) def add_to_instr(self, Instr): \"\"\" Adds information from", "file to be read \"\"\" # Check filename if not", "to be read \"\"\" # Check filename if not os.path.isfile(filename):", "can add to an existing McStasScript instrument instance or create", "McStasScript Instr instance write_python_file(filename) Write python file named filename that", "raise ValueError(\"Filename \\\"\" + filename + \"\\\" already exists, you", "def __init__(self, filename): \"\"\" Initialization of McStas_file class, needs McStas", "filename that reproduce the McStas instr \"\"\" def __init__(self, filename):", "Writes python file that reproduces McStas instrument file Parameters ----------", "if not isinstance(Instr, McStas_instr): raise TypeError(\"Given object is not of" ]
[ "n threads\") parser.add_option(\"-o\", \"--opts\", dest=\"opts\", default=\"\", type=str, help=\"additional arguments to", "help=\"additional arguments to csolve\") parser.disable_interspersed_args() options, args = parser.parse_args() runner", "# FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES #", "California. All rights reserved. # # Permission is hereby granted,", "WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES", "MODIFICATIONS. import time, subprocess, optparse, sys, socket, os import misc.rtest", "out=None, err=None): print \"exec: \" + \" \".join(args) return subprocess.call(args,", "EVEN # IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED", "+ hygiene_flags + [file], out) out.close() return rv def run_script(file,quiet):", "# ARISING OUT OF THE USE OF THIS SOFTWARE AND", "rights reserved. # # Permission is hereby granted, without written", "and without # license or royalty fees, to use, copy,", "copies of this software. # # IN NO EVENT SHALL", "provided that the # above copyright notice and the following", "(self, testdirs, logfile, threadcount) self.dargs = dargs if os.path.exists(\"../tests/postests/coreutils/\"): logged_sys_call([\"../tests/postests/coreutils/makeCoreUtil.sh\",", "def solve_quals(file,bare,time,quiet,flags): if quiet: out = null else: out =", "distribute this # software and its documentation for any purpose,", "l = f.readline() f.close() if l.startswith(argcomment): return l[len(argcomment):].strip().split(\" \") else:", "\"AS IS\" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO", "open(\"/dev/null\", \"w\") now = (time.asctime(time.localtime(time.time()))).replace(\" \",\"_\") logfile = \"../tests/logs/regrtest_results_%s_%s\" %", "is hereby granted, without written agreement and without # license", "open(file) l = f.readline() f.close() if l.startswith(argcomment): return l[len(argcomment):].strip().split(\" \")", "= \"./csolve -c\".split() null = open(\"/dev/null\", \"w\") now = (time.asctime(time.localtime(time.time()))).replace(\"", "= \"//! run with \" def logged_sys_call(args, out=None, err=None): print", "THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY", "f = open(file) l = f.readline() f.close() if l.startswith(argcomment): return", "if quiet: out = null else: out = None return", "POSSIBILITY # OF SUCH DAMAGE. # # THE UNIVERSITY OF", "#testdirs = [(\"../postests\", 0)] #testdirs = [(\"../negtests\", 1)] #testdirs =", "LIABLE TO ANY PARTY # FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL,", "NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY # AND", "INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES # ARISING OUT OF", "fargs = getfileargs(file) return solve_quals(file, True, False, True, fargs) elif", "NO OBLIGATION # TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR", "runner = rtest.TestRunner (Config (options.opts, testdirs, logfile, options.threadcount)) exit (runner.run", "rv def run_script(file,quiet): if quiet: out = null else: out", "time = [] hygiene_flags = [(\"--csolveprefix=%s\" % (file)), \"-o\", \"/dev/null\"]", "self.dargs if file.endswith(\".c\"): fargs = getfileargs(file) return solve_quals(file, True, False,", "argcomment = \"//! run with \" def logged_sys_call(args, out=None, err=None):", "[] class Config (rtest.TestConfig): def __init__ (self, dargs, testdirs, logfile,", "logfile = \"../tests/logs/regrtest_results_%s_%s\" % (socket.gethostname (), now) argcomment = \"//!", "def logged_sys_call(args, out=None, err=None): print \"exec: \" + \" \".join(args)", "TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. import time,", "hereby granted, without written agreement and without # license or", "out = null else: out = None return logged_sys_call(file, out)", "True, False, True, fargs) elif file.endswith(\".sh\"): return run_script(file, True) def", "of the University of California. All rights reserved. # #", "parser.add_option(\"-o\", \"--opts\", dest=\"opts\", default=\"\", type=str, help=\"additional arguments to csolve\") parser.disable_interspersed_args()", "WARRANTIES OF MERCHANTABILITY # AND FITNESS FOR A PARTICULAR PURPOSE.", "in # all copies of this software. # # IN", "PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. import time, subprocess,", "University of California. All rights reserved. # # Permission is", "(file)), \"-o\", \"/dev/null\"] out = open(file + \".log\", \"w\") rv", "OF MERCHANTABILITY # AND FITNESS FOR A PARTICULAR PURPOSE. THE", "BE LIABLE TO ANY PARTY # FOR DIRECT, INDIRECT, SPECIAL,", "# IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF", "= dargs if os.path.exists(\"../tests/postests/coreutils/\"): logged_sys_call([\"../tests/postests/coreutils/makeCoreUtil.sh\", \"init\"], None) def run_test (self,", "= [(\"../negtests\", 1)] #testdirs = [(\"../slowtests\", 1)] #DEFAULT testdirs =", "testdirs, logfile, threadcount): rtest.TestConfig.__init__ (self, testdirs, logfile, threadcount) self.dargs =", "any purpose, provided that the # above copyright notice and", "# INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF", "# license or royalty fees, to use, copy, modify, and", "optparse, sys, socket, os import misc.rtest as rtest solve =", "DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES # ARISING OUT", "if file.endswith(\".c\"): fargs = getfileargs(file) return solve_quals(file, True, False, True,", "null else: out = None return logged_sys_call(file, out) def getfileargs(file):", "# above copyright notice and the following two paragraphs appear", "solve + flags + hygiene_flags + [file], out) out.close() return", "(file.endswith(\".c\") and not file.endswith(\".csolve.save.c\") and not file.endswith(\".ssa.c\")) ##################################################################################### #testdirs =", "1)] #testdirs = [(\"../slowtests\", 1)] #DEFAULT testdirs = [(\"../tests/postests\", 0),", "null = open(\"/dev/null\", \"w\") now = (time.asctime(time.localtime(time.time()))).replace(\" \",\"_\") logfile =", "= optparse.OptionParser() parser.add_option(\"-t\", \"--threads\", dest=\"threadcount\", default=1, type=int, help=\"spawn n threads\")", "SOFTWARE AND ITS DOCUMENTATION, EVEN # IF THE UNIVERSITY OF", "is_test (self, file): return (file.endswith(\".sh\") and os.access(file, os.X_OK)) \\ or", "copyright notice and the following two paragraphs appear in #", "Permission is hereby granted, without written agreement and without #", "[] hygiene_flags = [(\"--csolveprefix=%s\" % (file)), \"-o\", \"/dev/null\"] out =", "OBLIGATION # TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS.", "\"-o\", \"/dev/null\"] out = open(file + \".log\", \"w\") rv =", "dest=\"threadcount\", default=1, type=int, help=\"spawn n threads\") parser.add_option(\"-o\", \"--opts\", dest=\"opts\", default=\"\",", "= [] hygiene_flags = [(\"--csolveprefix=%s\" % (file)), \"-o\", \"/dev/null\"] out", "logfile, threadcount) self.dargs = dargs if os.path.exists(\"../tests/postests/coreutils/\"): logged_sys_call([\"../tests/postests/coreutils/makeCoreUtil.sh\", \"init\"], None)", "+ solve + flags + hygiene_flags + [file], out) out.close()", "OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN # IF THE", "os import misc.rtest as rtest solve = \"./csolve -c\".split() null", "BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY #", "(c) 2009 The Regents of the University of California. All", "\"/dev/null\"] out = open(file + \".log\", \"w\") rv = logged_sys_call(time", "<reponame>ucsd-progsys/csolve-bak<filename>src/regrtest.py #!/usr/bin/python # Copyright (c) 2009 The Regents of the", "\"w\") now = (time.asctime(time.localtime(time.time()))).replace(\" \",\"_\") logfile = \"../tests/logs/regrtest_results_%s_%s\" % (socket.gethostname", "use, copy, modify, and distribute this # software and its", "return solve_quals(file, True, False, True, fargs) elif file.endswith(\".sh\"): return run_script(file,", "= open(file) l = f.readline() f.close() if l.startswith(argcomment): return l[len(argcomment):].strip().split(\"", "0)] parser = optparse.OptionParser() parser.add_option(\"-t\", \"--threads\", dest=\"threadcount\", default=1, type=int, help=\"spawn", "stdout=out, stderr=err) def solve_quals(file,bare,time,quiet,flags): if quiet: out = null else:", "FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES # ARISING", "not file.endswith(\".ssa.c\")) ##################################################################################### #testdirs = [(\"../postests\", 0)] #testdirs = [(\"../negtests\",", "AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER", "THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION # TO PROVIDE", "# # IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA", "return run_script(file, True) def is_test (self, file): return (file.endswith(\".sh\") and", "sys, socket, os import misc.rtest as rtest solve = \"./csolve", "out = open(file + \".log\", \"w\") rv = logged_sys_call(time +", "this # software and its documentation for any purpose, provided", "TO ANY PARTY # FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR", "\"w\") rv = logged_sys_call(time + solve + flags + hygiene_flags", "Copyright (c) 2009 The Regents of the University of California.", "FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS #", "quiet: out = null else: out = None if time:", "IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE", "= [(\"../tests/postests\", 0), (\"../tests/negtests\", [1, 2])] #testdirs = [(\"../tests/microtests\", 0)]", "\" def logged_sys_call(args, out=None, err=None): print \"exec: \" + \"", "[(\"../tests/microtests\", 0)] parser = optparse.OptionParser() parser.add_option(\"-t\", \"--threads\", dest=\"threadcount\", default=1, type=int,", "\"//! run with \" def logged_sys_call(args, out=None, err=None): print \"exec:", "out = None if time: time = [\"time\"] else: time", "\" + \" \".join(args) return subprocess.call(args, stdout=out, stderr=err) def solve_quals(file,bare,time,quiet,flags):", "testdirs, logfile, threadcount) self.dargs = dargs if os.path.exists(\"../tests/postests/coreutils/\"): logged_sys_call([\"../tests/postests/coreutils/makeCoreUtil.sh\", \"init\"],", "if l.startswith(argcomment): return l[len(argcomment):].strip().split(\" \") else: return [] class Config", "to csolve\") parser.disable_interspersed_args() options, args = parser.parse_args() runner = rtest.TestRunner", "UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY # FOR", "\"./csolve -c\".split() null = open(\"/dev/null\", \"w\") now = (time.asctime(time.localtime(time.time()))).replace(\" \",\"_\")", "help=\"spawn n threads\") parser.add_option(\"-o\", \"--opts\", dest=\"opts\", default=\"\", type=str, help=\"additional arguments", "file.endswith(\".csolve.save.c\") and not file.endswith(\".ssa.c\")) ##################################################################################### #testdirs = [(\"../postests\", 0)] #testdirs", "\".log\", \"w\") rv = logged_sys_call(time + solve + flags +", "= \"../tests/logs/regrtest_results_%s_%s\" % (socket.gethostname (), now) argcomment = \"//! run", "hygiene_flags + [file], out) out.close() return rv def run_script(file,quiet): if", "all copies of this software. # # IN NO EVENT", "(self, file): return (file.endswith(\".sh\") and os.access(file, os.X_OK)) \\ or (file.endswith(\".c\")", "getfileargs(file): f = open(file) l = f.readline() f.close() if l.startswith(argcomment):", "DAMAGES # ARISING OUT OF THE USE OF THIS SOFTWARE", "CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY # OF SUCH", "OF THE POSSIBILITY # OF SUCH DAMAGE. # # THE", "\"--threads\", dest=\"threadcount\", default=1, type=int, help=\"spawn n threads\") parser.add_option(\"-o\", \"--opts\", dest=\"opts\",", "reserved. # # Permission is hereby granted, without written agreement", "# THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, #", "the # above copyright notice and the following two paragraphs", "arguments to csolve\") parser.disable_interspersed_args() options, args = parser.parse_args() runner =", "that the # above copyright notice and the following two", "subprocess.call(args, stdout=out, stderr=err) def solve_quals(file,bare,time,quiet,flags): if quiet: out = null", "The Regents of the University of California. All rights reserved.", "if time: time = [\"time\"] else: time = [] hygiene_flags", "without written agreement and without # license or royalty fees,", "OF CALIFORNIA HAS NO OBLIGATION # TO PROVIDE MAINTENANCE, SUPPORT,", "= None if time: time = [\"time\"] else: time =", "the University of California. All rights reserved. # # Permission", "USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN # IF", "0), (\"../tests/negtests\", [1, 2])] #testdirs = [(\"../tests/microtests\", 0)] parser =", "dargs if os.path.exists(\"../tests/postests/coreutils/\"): logged_sys_call([\"../tests/postests/coreutils/makeCoreUtil.sh\", \"init\"], None) def run_test (self, file):", "solve = \"./csolve -c\".split() null = open(\"/dev/null\", \"w\") now =", "out) out.close() return rv def run_script(file,quiet): if quiet: out =", "PROVIDED HEREUNDER IS # ON AN \"AS IS\" BASIS, AND", "not file.endswith(\".csolve.save.c\") and not file.endswith(\".ssa.c\")) ##################################################################################### #testdirs = [(\"../postests\", 0)]", "THE SOFTWARE PROVIDED HEREUNDER IS # ON AN \"AS IS\"", "# # Permission is hereby granted, without written agreement and", "# software and its documentation for any purpose, provided that", "# OF SUCH DAMAGE. # # THE UNIVERSITY OF CALIFORNIA", "#testdirs = [(\"../tests/microtests\", 0)] parser = optparse.OptionParser() parser.add_option(\"-t\", \"--threads\", dest=\"threadcount\",", "following two paragraphs appear in # all copies of this", "OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY # OF", "above copyright notice and the following two paragraphs appear in", "of this software. # # IN NO EVENT SHALL THE", "threadcount): rtest.TestConfig.__init__ (self, testdirs, logfile, threadcount) self.dargs = dargs if", "FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS", "# Permission is hereby granted, without written agreement and without", "\\ or (file.endswith(\".c\") and not file.endswith(\".csolve.save.c\") and not file.endswith(\".ssa.c\")) #####################################################################################", "dest=\"opts\", default=\"\", type=str, help=\"additional arguments to csolve\") parser.disable_interspersed_args() options, args", "ANY WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE IMPLIED", "default=\"\", type=str, help=\"additional arguments to csolve\") parser.disable_interspersed_args() options, args =", "threads\") parser.add_option(\"-o\", \"--opts\", dest=\"opts\", default=\"\", type=str, help=\"additional arguments to csolve\")", "parser.parse_args() runner = rtest.TestRunner (Config (options.opts, testdirs, logfile, options.threadcount)) exit", "None return logged_sys_call(file, out) def getfileargs(file): f = open(file) l", "options, args = parser.parse_args() runner = rtest.TestRunner (Config (options.opts, testdirs,", "SOFTWARE PROVIDED HEREUNDER IS # ON AN \"AS IS\" BASIS,", "without # license or royalty fees, to use, copy, modify,", "# AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED", "its documentation for any purpose, provided that the # above", "purpose, provided that the # above copyright notice and the", "def getfileargs(file): f = open(file) l = f.readline() f.close() if", "out = None return logged_sys_call(file, out) def getfileargs(file): f =", "documentation for any purpose, provided that the # above copyright", "CONSEQUENTIAL DAMAGES # ARISING OUT OF THE USE OF THIS", "self.dargs = dargs if os.path.exists(\"../tests/postests/coreutils/\"): logged_sys_call([\"../tests/postests/coreutils/makeCoreUtil.sh\", \"init\"], None) def run_test", "DAMAGE. # # THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY", "parser.add_option(\"-t\", \"--threads\", dest=\"threadcount\", default=1, type=int, help=\"spawn n threads\") parser.add_option(\"-o\", \"--opts\",", "file): return (file.endswith(\".sh\") and os.access(file, os.X_OK)) \\ or (file.endswith(\".c\") and", "type=int, help=\"spawn n threads\") parser.add_option(\"-o\", \"--opts\", dest=\"opts\", default=\"\", type=str, help=\"additional", "rv = logged_sys_call(time + solve + flags + hygiene_flags +", "AN \"AS IS\" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS", "# # THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES,", "granted, without written agreement and without # license or royalty", "(file.endswith(\".sh\") and os.access(file, os.X_OK)) \\ or (file.endswith(\".c\") and not file.endswith(\".csolve.save.c\")", "\"exec: \" + \" \".join(args) return subprocess.call(args, stdout=out, stderr=err) def", "[(\"../postests\", 0)] #testdirs = [(\"../negtests\", 1)] #testdirs = [(\"../slowtests\", 1)]", "\"../tests/logs/regrtest_results_%s_%s\" % (socket.gethostname (), now) argcomment = \"//! run with", "UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION # TO PROVIDE MAINTENANCE,", "out) def getfileargs(file): f = open(file) l = f.readline() f.close()", "optparse.OptionParser() parser.add_option(\"-t\", \"--threads\", dest=\"threadcount\", default=1, type=int, help=\"spawn n threads\") parser.add_option(\"-o\",", "agreement and without # license or royalty fees, to use,", "time: time = [\"time\"] else: time = [] hygiene_flags =", "copy, modify, and distribute this # software and its documentation", "= (time.asctime(time.localtime(time.time()))).replace(\" \",\"_\") logfile = \"../tests/logs/regrtest_results_%s_%s\" % (socket.gethostname (), now)", "[1, 2])] #testdirs = [(\"../tests/microtests\", 0)] parser = optparse.OptionParser() parser.add_option(\"-t\",", "A PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS # ON", "= open(\"/dev/null\", \"w\") now = (time.asctime(time.localtime(time.time()))).replace(\" \",\"_\") logfile = \"../tests/logs/regrtest_results_%s_%s\"", "software. # # IN NO EVENT SHALL THE UNIVERSITY OF", "parser = optparse.OptionParser() parser.add_option(\"-t\", \"--threads\", dest=\"threadcount\", default=1, type=int, help=\"spawn n", "as rtest solve = \"./csolve -c\".split() null = open(\"/dev/null\", \"w\")", "ARISING OUT OF THE USE OF THIS SOFTWARE AND ITS", "None if time: time = [\"time\"] else: time = []", "and its documentation for any purpose, provided that the #", "two paragraphs appear in # all copies of this software.", "(self, file): os.environ['CSOLVEFLAGS'] = self.dargs if file.endswith(\".c\"): fargs = getfileargs(file)", "return subprocess.call(args, stdout=out, stderr=err) def solve_quals(file,bare,time,quiet,flags): if quiet: out =", "THIS SOFTWARE AND ITS DOCUMENTATION, EVEN # IF THE UNIVERSITY", "= parser.parse_args() runner = rtest.TestRunner (Config (options.opts, testdirs, logfile, options.threadcount))", "(), now) argcomment = \"//! run with \" def logged_sys_call(args,", "to use, copy, modify, and distribute this # software and", "[(\"../slowtests\", 1)] #DEFAULT testdirs = [(\"../tests/postests\", 0), (\"../tests/negtests\", [1, 2])]", "logged_sys_call([\"../tests/postests/coreutils/makeCoreUtil.sh\", \"init\"], None) def run_test (self, file): os.environ['CSOLVEFLAGS'] = self.dargs", "MERCHANTABILITY # AND FITNESS FOR A PARTICULAR PURPOSE. THE SOFTWARE", "CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, # INCLUDING, BUT NOT LIMITED", "MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. import time, subprocess, optparse,", "INCIDENTAL, OR CONSEQUENTIAL DAMAGES # ARISING OUT OF THE USE", "ON AN \"AS IS\" BASIS, AND THE UNIVERSITY OF CALIFORNIA", "% (file)), \"-o\", \"/dev/null\"] out = open(file + \".log\", \"w\")", "Regents of the University of California. All rights reserved. #", "SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY", "= null else: out = None if time: time =", "file.endswith(\".ssa.c\")) ##################################################################################### #testdirs = [(\"../postests\", 0)] #testdirs = [(\"../negtests\", 1)]", "= logged_sys_call(time + solve + flags + hygiene_flags + [file],", "+ flags + hygiene_flags + [file], out) out.close() return rv", "return l[len(argcomment):].strip().split(\" \") else: return [] class Config (rtest.TestConfig): def", "CALIFORNIA HAS NO OBLIGATION # TO PROVIDE MAINTENANCE, SUPPORT, UPDATES,", "quiet: out = null else: out = None return logged_sys_call(file,", "if os.path.exists(\"../tests/postests/coreutils/\"): logged_sys_call([\"../tests/postests/coreutils/makeCoreUtil.sh\", \"init\"], None) def run_test (self, file): os.environ['CSOLVEFLAGS']", "\".join(args) return subprocess.call(args, stdout=out, stderr=err) def solve_quals(file,bare,time,quiet,flags): if quiet: out", "of California. All rights reserved. # # Permission is hereby", "type=str, help=\"additional arguments to csolve\") parser.disable_interspersed_args() options, args = parser.parse_args()", "\"init\"], None) def run_test (self, file): os.environ['CSOLVEFLAGS'] = self.dargs if", "time, subprocess, optparse, sys, socket, os import misc.rtest as rtest", "def __init__ (self, dargs, testdirs, logfile, threadcount): rtest.TestConfig.__init__ (self, testdirs,", "= null else: out = None return logged_sys_call(file, out) def", "DOCUMENTATION, EVEN # IF THE UNIVERSITY OF CALIFORNIA HAS BEEN", "SUCH DAMAGE. # # THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS", "run_script(file, True) def is_test (self, file): return (file.endswith(\".sh\") and os.access(file,", "##################################################################################### #testdirs = [(\"../postests\", 0)] #testdirs = [(\"../negtests\", 1)] #testdirs", "False, True, fargs) elif file.endswith(\".sh\"): return run_script(file, True) def is_test", "PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS # ON AN \"AS", "elif file.endswith(\".sh\"): return run_script(file, True) def is_test (self, file): return", "out.close() return rv def run_script(file,quiet): if quiet: out = null", "def run_test (self, file): os.environ['CSOLVEFLAGS'] = self.dargs if file.endswith(\".c\"): fargs", "ENHANCEMENTS, OR MODIFICATIONS. import time, subprocess, optparse, sys, socket, os", "with \" def logged_sys_call(args, out=None, err=None): print \"exec: \" +", "written agreement and without # license or royalty fees, to", "THE UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, # INCLUDING,", "and the following two paragraphs appear in # all copies", "royalty fees, to use, copy, modify, and distribute this #", "os.path.exists(\"../tests/postests/coreutils/\"): logged_sys_call([\"../tests/postests/coreutils/makeCoreUtil.sh\", \"init\"], None) def run_test (self, file): os.environ['CSOLVEFLAGS'] =", "0)] #testdirs = [(\"../negtests\", 1)] #testdirs = [(\"../slowtests\", 1)] #DEFAULT", "solve_quals(file,bare,time,quiet,flags): if quiet: out = null else: out = None", "[file], out) out.close() return rv def run_script(file,quiet): if quiet: out", "or royalty fees, to use, copy, modify, and distribute this", "open(file + \".log\", \"w\") rv = logged_sys_call(time + solve +", "= f.readline() f.close() if l.startswith(argcomment): return l[len(argcomment):].strip().split(\" \") else: return", "import misc.rtest as rtest solve = \"./csolve -c\".split() null =", "-c\".split() null = open(\"/dev/null\", \"w\") now = (time.asctime(time.localtime(time.time()))).replace(\" \",\"_\") logfile", "return logged_sys_call(file, out) def getfileargs(file): f = open(file) l =", "[(\"../negtests\", 1)] #testdirs = [(\"../slowtests\", 1)] #DEFAULT testdirs = [(\"../tests/postests\",", "\"--opts\", dest=\"opts\", default=\"\", type=str, help=\"additional arguments to csolve\") parser.disable_interspersed_args() options,", "LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS", "OF CALIFORNIA BE LIABLE TO ANY PARTY # FOR DIRECT,", "AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION # TO", "software and its documentation for any purpose, provided that the", "if quiet: out = null else: out = None if", "PARTY # FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES", "modify, and distribute this # software and its documentation for", "time = [\"time\"] else: time = [] hygiene_flags = [(\"--csolveprefix=%s\"", "= [\"time\"] else: time = [] hygiene_flags = [(\"--csolveprefix=%s\" %", "True, fargs) elif file.endswith(\".sh\"): return run_script(file, True) def is_test (self,", "return (file.endswith(\".sh\") and os.access(file, os.X_OK)) \\ or (file.endswith(\".c\") and not", "else: time = [] hygiene_flags = [(\"--csolveprefix=%s\" % (file)), \"-o\",", "THE POSSIBILITY # OF SUCH DAMAGE. # # THE UNIVERSITY", "IS\" BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION", "[(\"../tests/postests\", 0), (\"../tests/negtests\", [1, 2])] #testdirs = [(\"../tests/microtests\", 0)] parser", "2009 The Regents of the University of California. All rights", "+ [file], out) out.close() return rv def run_script(file,quiet): if quiet:", "TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS FOR", "testdirs = [(\"../tests/postests\", 0), (\"../tests/negtests\", [1, 2])] #testdirs = [(\"../tests/microtests\",", "err=None): print \"exec: \" + \" \".join(args) return subprocess.call(args, stdout=out,", "IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS FOR A PARTICULAR", "misc.rtest as rtest solve = \"./csolve -c\".split() null = open(\"/dev/null\",", "getfileargs(file) return solve_quals(file, True, False, True, fargs) elif file.endswith(\".sh\"): return", "THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN #", "else: out = None return logged_sys_call(file, out) def getfileargs(file): f", "logged_sys_call(file, out) def getfileargs(file): f = open(file) l = f.readline()", "stderr=err) def solve_quals(file,bare,time,quiet,flags): if quiet: out = null else: out", "default=1, type=int, help=\"spawn n threads\") parser.add_option(\"-o\", \"--opts\", dest=\"opts\", default=\"\", type=str,", "= [(\"../tests/microtests\", 0)] parser = optparse.OptionParser() parser.add_option(\"-t\", \"--threads\", dest=\"threadcount\", default=1,", "SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. import time, subprocess, optparse, sys,", "def is_test (self, file): return (file.endswith(\".sh\") and os.access(file, os.X_OK)) \\", "# IN NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE", "HAS NO OBLIGATION # TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS,", "OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION, EVEN", "return [] class Config (rtest.TestConfig): def __init__ (self, dargs, testdirs,", "file): os.environ['CSOLVEFLAGS'] = self.dargs if file.endswith(\".c\"): fargs = getfileargs(file) return", "# Copyright (c) 2009 The Regents of the University of", "dargs, testdirs, logfile, threadcount): rtest.TestConfig.__init__ (self, testdirs, logfile, threadcount) self.dargs", "(\"../tests/negtests\", [1, 2])] #testdirs = [(\"../tests/microtests\", 0)] parser = optparse.OptionParser()", "BASIS, AND THE UNIVERSITY OF CALIFORNIA HAS NO OBLIGATION #", "Config (rtest.TestConfig): def __init__ (self, dargs, testdirs, logfile, threadcount): rtest.TestConfig.__init__", "DISCLAIMS ANY WARRANTIES, # INCLUDING, BUT NOT LIMITED TO, THE", "rtest solve = \"./csolve -c\".split() null = open(\"/dev/null\", \"w\") now", "solve_quals(file, True, False, True, fargs) elif file.endswith(\".sh\"): return run_script(file, True)", "args = parser.parse_args() runner = rtest.TestRunner (Config (options.opts, testdirs, logfile,", "for any purpose, provided that the # above copyright notice", "return rv def run_script(file,quiet): if quiet: out = null else:", "= getfileargs(file) return solve_quals(file, True, False, True, fargs) elif file.endswith(\".sh\"):", "# all copies of this software. # # IN NO", "file.endswith(\".sh\"): return run_script(file, True) def is_test (self, file): return (file.endswith(\".sh\")", "% (socket.gethostname (), now) argcomment = \"//! run with \"", "AND ITS DOCUMENTATION, EVEN # IF THE UNIVERSITY OF CALIFORNIA", "the following two paragraphs appear in # all copies of", "else: return [] class Config (rtest.TestConfig): def __init__ (self, dargs,", "None) def run_test (self, file): os.environ['CSOLVEFLAGS'] = self.dargs if file.endswith(\".c\"):", "os.X_OK)) \\ or (file.endswith(\".c\") and not file.endswith(\".csolve.save.c\") and not file.endswith(\".ssa.c\"))", "OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, # INCLUDING, BUT NOT", "run_script(file,quiet): if quiet: out = null else: out = None", "logged_sys_call(args, out=None, err=None): print \"exec: \" + \" \".join(args) return", "INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY", "class Config (rtest.TestConfig): def __init__ (self, dargs, testdirs, logfile, threadcount):", "= open(file + \".log\", \"w\") rv = logged_sys_call(time + solve", "l[len(argcomment):].strip().split(\" \") else: return [] class Config (rtest.TestConfig): def __init__", "\") else: return [] class Config (rtest.TestConfig): def __init__ (self,", "= None return logged_sys_call(file, out) def getfileargs(file): f = open(file)", "# ON AN \"AS IS\" BASIS, AND THE UNIVERSITY OF", "fargs) elif file.endswith(\".sh\"): return run_script(file, True) def is_test (self, file):", "+ \" \".join(args) return subprocess.call(args, stdout=out, stderr=err) def solve_quals(file,bare,time,quiet,flags): if", "rtest.TestConfig.__init__ (self, testdirs, logfile, threadcount) self.dargs = dargs if os.path.exists(\"../tests/postests/coreutils/\"):", "threadcount) self.dargs = dargs if os.path.exists(\"../tests/postests/coreutils/\"): logged_sys_call([\"../tests/postests/coreutils/makeCoreUtil.sh\", \"init\"], None) def", "UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE POSSIBILITY #", "now = (time.asctime(time.localtime(time.time()))).replace(\" \",\"_\") logfile = \"../tests/logs/regrtest_results_%s_%s\" % (socket.gethostname (),", "out = null else: out = None if time: time", "SPECIAL, INCIDENTAL, OR CONSEQUENTIAL DAMAGES # ARISING OUT OF THE", "subprocess, optparse, sys, socket, os import misc.rtest as rtest solve", "BEEN ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. #", "OUT OF THE USE OF THIS SOFTWARE AND ITS DOCUMENTATION,", "notice and the following two paragraphs appear in # all", "(rtest.TestConfig): def __init__ (self, dargs, testdirs, logfile, threadcount): rtest.TestConfig.__init__ (self,", "PARTICULAR PURPOSE. THE SOFTWARE PROVIDED HEREUNDER IS # ON AN", "= rtest.TestRunner (Config (options.opts, testdirs, logfile, options.threadcount)) exit (runner.run ())", "OF SUCH DAMAGE. # # THE UNIVERSITY OF CALIFORNIA SPECIFICALLY", "ITS DOCUMENTATION, EVEN # IF THE UNIVERSITY OF CALIFORNIA HAS", "f.close() if l.startswith(argcomment): return l[len(argcomment):].strip().split(\" \") else: return [] class", "hygiene_flags = [(\"--csolveprefix=%s\" % (file)), \"-o\", \"/dev/null\"] out = open(file", "All rights reserved. # # Permission is hereby granted, without", "(self, dargs, testdirs, logfile, threadcount): rtest.TestConfig.__init__ (self, testdirs, logfile, threadcount)", "print \"exec: \" + \" \".join(args) return subprocess.call(args, stdout=out, stderr=err)", "run with \" def logged_sys_call(args, out=None, err=None): print \"exec: \"", "UPDATES, ENHANCEMENTS, OR MODIFICATIONS. import time, subprocess, optparse, sys, socket,", "NO EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO", "socket, os import misc.rtest as rtest solve = \"./csolve -c\".split()", "[\"time\"] else: time = [] hygiene_flags = [(\"--csolveprefix=%s\" % (file)),", "= [(\"--csolveprefix=%s\" % (file)), \"-o\", \"/dev/null\"] out = open(file +", "THE IMPLIED WARRANTIES OF MERCHANTABILITY # AND FITNESS FOR A", "#testdirs = [(\"../slowtests\", 1)] #DEFAULT testdirs = [(\"../tests/postests\", 0), (\"../tests/negtests\",", "paragraphs appear in # all copies of this software. #", "UNIVERSITY OF CALIFORNIA SPECIFICALLY DISCLAIMS ANY WARRANTIES, # INCLUDING, BUT", "[(\"--csolveprefix=%s\" % (file)), \"-o\", \"/dev/null\"] out = open(file + \".log\",", "#DEFAULT testdirs = [(\"../tests/postests\", 0), (\"../tests/negtests\", [1, 2])] #testdirs =", "and os.access(file, os.X_OK)) \\ or (file.endswith(\".c\") and not file.endswith(\".csolve.save.c\") and", "and not file.endswith(\".csolve.save.c\") and not file.endswith(\".ssa.c\")) ##################################################################################### #testdirs = [(\"../postests\",", "fees, to use, copy, modify, and distribute this # software", "EVENT SHALL THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY", "\" \".join(args) return subprocess.call(args, stdout=out, stderr=err) def solve_quals(file,bare,time,quiet,flags): if quiet:", "and not file.endswith(\".ssa.c\")) ##################################################################################### #testdirs = [(\"../postests\", 0)] #testdirs =", "this software. # # IN NO EVENT SHALL THE UNIVERSITY", "and distribute this # software and its documentation for any", "ANY PARTY # FOR DIRECT, INDIRECT, SPECIAL, INCIDENTAL, OR CONSEQUENTIAL", "l.startswith(argcomment): return l[len(argcomment):].strip().split(\" \") else: return [] class Config (rtest.TestConfig):", "(socket.gethostname (), now) argcomment = \"//! run with \" def", "logfile, threadcount): rtest.TestConfig.__init__ (self, testdirs, logfile, threadcount) self.dargs = dargs", "HEREUNDER IS # ON AN \"AS IS\" BASIS, AND THE", "import time, subprocess, optparse, sys, socket, os import misc.rtest as", "run_test (self, file): os.environ['CSOLVEFLAGS'] = self.dargs if file.endswith(\".c\"): fargs =", "#!/usr/bin/python # Copyright (c) 2009 The Regents of the University", "f.readline() f.close() if l.startswith(argcomment): return l[len(argcomment):].strip().split(\" \") else: return []", "now) argcomment = \"//! run with \" def logged_sys_call(args, out=None,", "file.endswith(\".c\"): fargs = getfileargs(file) return solve_quals(file, True, False, True, fargs)", "appear in # all copies of this software. # #", "ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE. # #", "\",\"_\") logfile = \"../tests/logs/regrtest_results_%s_%s\" % (socket.gethostname (), now) argcomment =", "else: out = None if time: time = [\"time\"] else:", "parser.disable_interspersed_args() options, args = parser.parse_args() runner = rtest.TestRunner (Config (options.opts,", "+ \".log\", \"w\") rv = logged_sys_call(time + solve + flags", "= [(\"../postests\", 0)] #testdirs = [(\"../negtests\", 1)] #testdirs = [(\"../slowtests\",", "= [(\"../slowtests\", 1)] #DEFAULT testdirs = [(\"../tests/postests\", 0), (\"../tests/negtests\", [1,", "logged_sys_call(time + solve + flags + hygiene_flags + [file], out)", "(time.asctime(time.localtime(time.time()))).replace(\" \",\"_\") logfile = \"../tests/logs/regrtest_results_%s_%s\" % (socket.gethostname (), now) argcomment", "OR MODIFICATIONS. import time, subprocess, optparse, sys, socket, os import", "True) def is_test (self, file): return (file.endswith(\".sh\") and os.access(file, os.X_OK))", "OR CONSEQUENTIAL DAMAGES # ARISING OUT OF THE USE OF", "2])] #testdirs = [(\"../tests/microtests\", 0)] parser = optparse.OptionParser() parser.add_option(\"-t\", \"--threads\",", "os.access(file, os.X_OK)) \\ or (file.endswith(\".c\") and not file.endswith(\".csolve.save.c\") and not", "license or royalty fees, to use, copy, modify, and distribute", "# TO PROVIDE MAINTENANCE, SUPPORT, UPDATES, ENHANCEMENTS, OR MODIFICATIONS. import", "THE UNIVERSITY OF CALIFORNIA BE LIABLE TO ANY PARTY #", "or (file.endswith(\".c\") and not file.endswith(\".csolve.save.c\") and not file.endswith(\".ssa.c\")) ##################################################################################### #testdirs", "os.environ['CSOLVEFLAGS'] = self.dargs if file.endswith(\".c\"): fargs = getfileargs(file) return solve_quals(file,", "csolve\") parser.disable_interspersed_args() options, args = parser.parse_args() runner = rtest.TestRunner (Config", "null else: out = None if time: time = [\"time\"]", "IS # ON AN \"AS IS\" BASIS, AND THE UNIVERSITY", "HAS BEEN ADVISED OF THE POSSIBILITY # OF SUCH DAMAGE.", "flags + hygiene_flags + [file], out) out.close() return rv def", "CALIFORNIA BE LIABLE TO ANY PARTY # FOR DIRECT, INDIRECT,", "#testdirs = [(\"../negtests\", 1)] #testdirs = [(\"../slowtests\", 1)] #DEFAULT testdirs", "def run_script(file,quiet): if quiet: out = null else: out =", "__init__ (self, dargs, testdirs, logfile, threadcount): rtest.TestConfig.__init__ (self, testdirs, logfile,", "IF THE UNIVERSITY OF CALIFORNIA HAS BEEN ADVISED OF THE", "SPECIFICALLY DISCLAIMS ANY WARRANTIES, # INCLUDING, BUT NOT LIMITED TO,", "= self.dargs if file.endswith(\".c\"): fargs = getfileargs(file) return solve_quals(file, True,", "1)] #DEFAULT testdirs = [(\"../tests/postests\", 0), (\"../tests/negtests\", [1, 2])] #testdirs" ]
[ "the capital of: \" + key + \"\\n\").lower() total_questions +=", "####################\"\"\" ####################### ################################################################### # 1. IMPORTS AND README ################################################################### import", "# Short for total_questions = total_questions + 1 if answer", "#score_screen(correct_answers, total_questions) ask_to_replay(correct_answers, total_questions) #print(\"You scored \" + str(correct_answers)+ \"/\"", "Guesser\", (\"Yes\", \"No\")) def ask_to_replay(correct_answers, total_questions): score = round(((correct_answers /", "(\" + str(correct_percent) + \"%)\") ################################################################### # 4. TESTING ###################################################################", "= total_questions + 1 if answer == COUNTRIES_CAPITALS[key] or answer.title()", "answer = input(\"Name the capital of: \" + key +", "correct_answers += 1 print(\"Correct!\") else: print(\"Wrong!\") # Should we keep", "\" (\" + str(correct_percent) + \"%)\") ################################################################### # 4. TESTING", "main_question_box(key) # answer = input(\"Name the capital of: \" +", "= 1 correct_answers = 0 total_questions = 0 ask_to_play() while", "= round(((correct_answers / total_questions) * 100), 2) if score >=", "you like to play again?: \\n\") if response.lower() == \"yes\"", "\"France\": \"Paris\"} def test_1(): pass # ask_to_play() # main_question_box(\"Canada\") funtime()", "+ \". Do you want to play again?\", \"~/Documents/ComputerClub/assets/happy_puppy.jpg\", [\"Yes\",", "# COUNTRIES_CAPITALS = {\"Canada\": \"Ottawa\", \"United States\": \"Washington\", \"France\": \"Paris\"}", "return easygui.buttonbox(\"Your score: \" + str(score) + \". Do you", "return easygui.enterbox(\"What is the capital of: \" + country +", "for key, value in COUNTRIES_CAPITALS.items(): answer = main_question_box(key) # answer", "== \"y\": playing = 1 else: playing = 0 #score_screen(correct_answers,", "States\": \"Washington\", \"France\": \"Paris\"} def test_1(): pass # ask_to_play() #", "COUNTRIES_CAPITALS.items(): answer = main_question_box(key) # answer = input(\"Name the capital", "etc...) and Values (Paris, Ottawa) country_list_getter.main() COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST def", "like to play again?: \\n\") if response.lower() == \"yes\" or", "\"No\")) def ask_to_replay(correct_answers, total_questions): score = round(((correct_answers / total_questions) *", "if score >= 50: return easygui.buttonbox(\"Your score: \" + str(score)", "return easygui.ynbox(\"Do you want to play a game?\", \"Country Guesser\",", "\"~/Documents/ComputerClub/assets/sad_puppy.jpg\", [\"Yes\", \"No\"]) def main_question_box(country): return easygui.enterbox(\"What is the capital", "= 0 ask_to_play() while playing: for key, value in COUNTRIES_CAPITALS.items():", "\" + key + \"\\n\").lower() total_questions += 1 # Short", "+ key + \"\\n\").lower() total_questions += 1 # Short for", "Readme 2. Functions 3. Main 4. Testing ####################\"\"\" ####################### ###################################################################", "again?\", \"~/Documents/ComputerClub/assets/happy_puppy.jpg\", [\"Yes\", \"No\"]) else: return easygui.buttonbox(\"Your score: \" +", "[\"Yes\", \"No\"]) def main_question_box(country): return easygui.enterbox(\"What is the capital of:", "playing = 1 correct_answers = 0 total_questions = 0 ask_to_play()", "\"\\n\").lower() total_questions += 1 # Short for total_questions = total_questions", "total_questions) * 100), 2) if score >= 50: return easygui.buttonbox(\"Your", "\"United States\": \"Washington\", \"France\": \"Paris\"} def test_1(): pass # ask_to_play()", "1 correct_answers = 0 total_questions = 0 ask_to_play() while playing:", "if answer == COUNTRIES_CAPITALS[key] or answer.title() == COUNTRIES_CAPITALS[key]: correct_answers +=", "def ask_to_replay(correct_answers, total_questions): score = round(((correct_answers / total_questions) * 100),", "key, value in COUNTRIES_CAPITALS.items(): answer = main_question_box(key) # answer =", "a game?\", \"Country Guesser\", (\"Yes\", \"No\")) def ask_to_replay(correct_answers, total_questions): score", "# Should we keep playing? response = input(\"Would you like", "(Paris, Ottawa) country_list_getter.main() COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST def ask_to_play(): return easygui.ynbox(\"Do", "2) if score >= 50: return easygui.buttonbox(\"Your score: \" +", "\"No\"]) def main_question_box(country): return easygui.enterbox(\"What is the capital of: \"", "input(\"Would you like to play again?: \\n\") if response.lower() ==", "to play again?: \\n\") if response.lower() == \"yes\" or response", "+ str(correct_percent) + \"%)\") ################################################################### # 4. TESTING ################################################################### #", "str(total_questions) + \" (\" + str(correct_percent) + \"%)\") ################################################################### #", "while playing: for key, value in COUNTRIES_CAPITALS.items(): answer = main_question_box(key)", "= main_question_box(key) # answer = input(\"Name the capital of: \"", "\" + str(correct_answers)+ \"/\" + str(total_questions) + \" (\" +", "= input(\"Name the capital of: \" + key + \"\\n\").lower()", "1 print(\"Correct!\") else: print(\"Wrong!\") # Should we keep playing? response", "+ str(total_questions) + \" (\" + str(correct_percent) + \"%)\") ###################################################################", "ask_to_play() while playing: for key, value in COUNTRIES_CAPITALS.items(): answer =", "= country_list_getter.FINAL_LIST def ask_to_play(): return easygui.ynbox(\"Do you want to play", "playing = 0 #score_screen(correct_answers, total_questions) ask_to_replay(correct_answers, total_questions) #print(\"You scored \"", "COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST def ask_to_play(): return easygui.ynbox(\"Do you want to", "in COUNTRIES_CAPITALS.items(): answer = main_question_box(key) # answer = input(\"Name the", "import country_list_getter ################################################################### # 2. FUNCTIONS ################################################################### # Dictionary. It", "easygui.ynbox(\"Do you want to play a game?\", \"Country Guesser\", (\"Yes\",", "+ \"%)\") ################################################################### # 4. TESTING ################################################################### # COUNTRIES_CAPITALS =", "################################################################### # 2. FUNCTIONS ################################################################### # Dictionary. It has keys", "################################################################### # 1. IMPORTS AND README ################################################################### import easygui import", "country + \"?\", \"Country Capital Guesser!!\") ################################################################### # 3. MAIN", "1 if answer == COUNTRIES_CAPITALS[key] or answer.title() == COUNTRIES_CAPITALS[key]: correct_answers", "100), 2) if score >= 50: return easygui.buttonbox(\"Your score: \"", "ask_to_replay(correct_answers, total_questions) #print(\"You scored \" + str(correct_answers)+ \"/\" + str(total_questions)", "the capital of: \" + country + \"?\", \"Country Capital", "# Dictionary. It has keys (Canada, France etc...) and Values", "####################### \"\"\"#################### Index: 1. Imports and Readme 2. Functions 3.", "total_questions = total_questions + 1 if answer == COUNTRIES_CAPITALS[key] or", "3. MAIN ################################################################### def funtime(): playing = 1 correct_answers =", "Should we keep playing? response = input(\"Would you like to", "ask_to_play(): return easygui.ynbox(\"Do you want to play a game?\", \"Country", "\". Do you want to play again?\", \"~/Documents/ComputerClub/assets/happy_puppy.jpg\", [\"Yes\", \"No\"])", "Ottawa) country_list_getter.main() COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST def ask_to_play(): return easygui.ynbox(\"Do you", "total_questions + 1 if answer == COUNTRIES_CAPITALS[key] or answer.title() ==", "score = round(((correct_answers / total_questions) * 100), 2) if score", "of: \" + country + \"?\", \"Country Capital Guesser!!\") ###################################################################", "\"~/Documents/ComputerClub/assets/happy_puppy.jpg\", [\"Yes\", \"No\"]) else: return easygui.buttonbox(\"Your score: \" + str(score)", "COUNTRIES_CAPITALS = {\"Canada\": \"Ottawa\", \"United States\": \"Washington\", \"France\": \"Paris\"} def", "score >= 50: return easygui.buttonbox(\"Your score: \" + str(score) +", "Do you want to play again?\", \"~/Documents/ComputerClub/assets/sad_puppy.jpg\", [\"Yes\", \"No\"]) def", "* 100), 2) if score >= 50: return easygui.buttonbox(\"Your score:", "response = input(\"Would you like to play again?: \\n\") if", "def funtime(): playing = 1 correct_answers = 0 total_questions =", "else: playing = 0 #score_screen(correct_answers, total_questions) ask_to_replay(correct_answers, total_questions) #print(\"You scored", "if response.lower() == \"yes\" or response == \"y\": playing =", "you want to play again?\", \"~/Documents/ComputerClub/assets/happy_puppy.jpg\", [\"Yes\", \"No\"]) else: return", "else: print(\"Wrong!\") # Should we keep playing? response = input(\"Would", "Guesser!!\") ################################################################### # 3. MAIN ################################################################### def funtime(): playing =", "# 4. TESTING ################################################################### # COUNTRIES_CAPITALS = {\"Canada\": \"Ottawa\", \"United", "print(\"Wrong!\") # Should we keep playing? response = input(\"Would you", "[\"Yes\", \"No\"]) else: return easygui.buttonbox(\"Your score: \" + str(score) +", "play again?: \\n\") if response.lower() == \"yes\" or response ==", "is the capital of: \" + country + \"?\", \"Country", "correct_answers = 0 total_questions = 0 ask_to_play() while playing: for", "It has keys (Canada, France etc...) and Values (Paris, Ottawa)", "capital of: \" + key + \"\\n\").lower() total_questions += 1", "response.lower() == \"yes\" or response == \"y\": playing = 1", "####################### ################################################################### # 1. IMPORTS AND README ################################################################### import easygui", "scored \" + str(correct_answers)+ \"/\" + str(total_questions) + \" (\"", "# 2. FUNCTIONS ################################################################### # Dictionary. It has keys (Canada,", "str(score) + \". Do you want to play again?\", \"~/Documents/ComputerClub/assets/happy_puppy.jpg\",", "Main 4. Testing ####################\"\"\" ####################### ################################################################### # 1. IMPORTS AND", "+ 1 if answer == COUNTRIES_CAPITALS[key] or answer.title() == COUNTRIES_CAPITALS[key]:", "\"Country Guesser\", (\"Yes\", \"No\")) def ask_to_replay(correct_answers, total_questions): score = round(((correct_answers", "str(correct_answers)+ \"/\" + str(total_questions) + \" (\" + str(correct_percent) +", "total_questions += 1 # Short for total_questions = total_questions +", "(\"Yes\", \"No\")) def ask_to_replay(correct_answers, total_questions): score = round(((correct_answers / total_questions)", "total_questions): score = round(((correct_answers / total_questions) * 100), 2) if", "want to play again?\", \"~/Documents/ComputerClub/assets/happy_puppy.jpg\", [\"Yes\", \"No\"]) else: return easygui.buttonbox(\"Your", "capital of: \" + country + \"?\", \"Country Capital Guesser!!\")", "\"?\", \"Country Capital Guesser!!\") ################################################################### # 3. MAIN ################################################################### def", "1 else: playing = 0 #score_screen(correct_answers, total_questions) ask_to_replay(correct_answers, total_questions) #print(\"You", "\"Washington\", \"France\": \"Paris\"} def test_1(): pass # ask_to_play() # main_question_box(\"Canada\")", "0 ask_to_play() while playing: for key, value in COUNTRIES_CAPITALS.items(): answer", "str(score) + \". Do you want to play again?\", \"~/Documents/ComputerClub/assets/sad_puppy.jpg\",", "== COUNTRIES_CAPITALS[key] or answer.title() == COUNTRIES_CAPITALS[key]: correct_answers += 1 print(\"Correct!\")", "Imports and Readme 2. Functions 3. Main 4. Testing ####################\"\"\"", "play again?\", \"~/Documents/ComputerClub/assets/happy_puppy.jpg\", [\"Yes\", \"No\"]) else: return easygui.buttonbox(\"Your score: \"", "country_list_getter ################################################################### # 2. FUNCTIONS ################################################################### # Dictionary. It has", "\". Do you want to play again?\", \"~/Documents/ComputerClub/assets/sad_puppy.jpg\", [\"Yes\", \"No\"])", "+= 1 print(\"Correct!\") else: print(\"Wrong!\") # Should we keep playing?", "input(\"Name the capital of: \" + key + \"\\n\").lower() total_questions", "= 0 #score_screen(correct_answers, total_questions) ask_to_replay(correct_answers, total_questions) #print(\"You scored \" +", "0 total_questions = 0 ask_to_play() while playing: for key, value", "+= 1 # Short for total_questions = total_questions + 1", "################################################################### # 4. TESTING ################################################################### # COUNTRIES_CAPITALS = {\"Canada\": \"Ottawa\",", "answer == COUNTRIES_CAPITALS[key] or answer.title() == COUNTRIES_CAPITALS[key]: correct_answers += 1", "Short for total_questions = total_questions + 1 if answer ==", "\"No\"]) else: return easygui.buttonbox(\"Your score: \" + str(score) + \".", "# 1. IMPORTS AND README ################################################################### import easygui import country_list_getter", "easygui import country_list_getter ################################################################### # 2. FUNCTIONS ################################################################### # Dictionary.", "+ country + \"?\", \"Country Capital Guesser!!\") ################################################################### # 3.", "\"Country Capital Guesser!!\") ################################################################### # 3. MAIN ################################################################### def funtime():", "== \"yes\" or response == \"y\": playing = 1 else:", "\"y\": playing = 1 else: playing = 0 #score_screen(correct_answers, total_questions)", "\"Ottawa\", \"United States\": \"Washington\", \"France\": \"Paris\"} def test_1(): pass #", "= {\"Canada\": \"Ottawa\", \"United States\": \"Washington\", \"France\": \"Paris\"} def test_1():", "answer.title() == COUNTRIES_CAPITALS[key]: correct_answers += 1 print(\"Correct!\") else: print(\"Wrong!\") #", "\"\"\"#################### Index: 1. Imports and Readme 2. Functions 3. Main", "/ total_questions) * 100), 2) if score >= 50: return", "################################################################### # COUNTRIES_CAPITALS = {\"Canada\": \"Ottawa\", \"United States\": \"Washington\", \"France\":", "print(\"Correct!\") else: print(\"Wrong!\") # Should we keep playing? response =", "{\"Canada\": \"Ottawa\", \"United States\": \"Washington\", \"France\": \"Paris\"} def test_1(): pass", "# answer = input(\"Name the capital of: \" + key", "= 1 else: playing = 0 #score_screen(correct_answers, total_questions) ask_to_replay(correct_answers, total_questions)", "want to play a game?\", \"Country Guesser\", (\"Yes\", \"No\")) def", "France etc...) and Values (Paris, Ottawa) country_list_getter.main() COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST", "4. Testing ####################\"\"\" ####################### ################################################################### # 1. IMPORTS AND README", "or answer.title() == COUNTRIES_CAPITALS[key]: correct_answers += 1 print(\"Correct!\") else: print(\"Wrong!\")", "you want to play a game?\", \"Country Guesser\", (\"Yes\", \"No\"))", "playing? response = input(\"Would you like to play again?: \\n\")", "Functions 3. Main 4. Testing ####################\"\"\" ####################### ################################################################### # 1.", "keys (Canada, France etc...) and Values (Paris, Ottawa) country_list_getter.main() COUNTRIES_CAPITALS", "value in COUNTRIES_CAPITALS.items(): answer = main_question_box(key) # answer = input(\"Name", "def main_question_box(country): return easygui.enterbox(\"What is the capital of: \" +", "response == \"y\": playing = 1 else: playing = 0", "+ \" (\" + str(correct_percent) + \"%)\") ################################################################### # 4.", "Values (Paris, Ottawa) country_list_getter.main() COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST def ask_to_play(): return", "0 #score_screen(correct_answers, total_questions) ask_to_replay(correct_answers, total_questions) #print(\"You scored \" + str(correct_answers)+", "\"/\" + str(total_questions) + \" (\" + str(correct_percent) + \"%)\")", "ask_to_replay(correct_answers, total_questions): score = round(((correct_answers / total_questions) * 100), 2)", "play again?\", \"~/Documents/ComputerClub/assets/sad_puppy.jpg\", [\"Yes\", \"No\"]) def main_question_box(country): return easygui.enterbox(\"What is", "= 0 total_questions = 0 ask_to_play() while playing: for key,", "again?: \\n\") if response.lower() == \"yes\" or response == \"y\":", "3. Main 4. Testing ####################\"\"\" ####################### ################################################################### # 1. IMPORTS", "2. FUNCTIONS ################################################################### # Dictionary. It has keys (Canada, France", "and Values (Paris, Ottawa) country_list_getter.main() COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST def ask_to_play():", "for total_questions = total_questions + 1 if answer == COUNTRIES_CAPITALS[key]", "\"yes\" or response == \"y\": playing = 1 else: playing", "has keys (Canada, France etc...) and Values (Paris, Ottawa) country_list_getter.main()", "AND README ################################################################### import easygui import country_list_getter ################################################################### # 2.", "score: \" + str(score) + \". Do you want to", "+ \". Do you want to play again?\", \"~/Documents/ComputerClub/assets/sad_puppy.jpg\", [\"Yes\",", "total_questions) ask_to_replay(correct_answers, total_questions) #print(\"You scored \" + str(correct_answers)+ \"/\" +", "#! /usr/bin/env python3 ####################### \"\"\"#################### Index: 1. Imports and Readme", "playing: for key, value in COUNTRIES_CAPITALS.items(): answer = main_question_box(key) #", "we keep playing? response = input(\"Would you like to play", "import easygui import country_list_getter ################################################################### # 2. FUNCTIONS ################################################################### #", "################################################################### def funtime(): playing = 1 correct_answers = 0 total_questions", "\" + country + \"?\", \"Country Capital Guesser!!\") ################################################################### #", "#print(\"You scored \" + str(correct_answers)+ \"/\" + str(total_questions) + \"", "\"%)\") ################################################################### # 4. TESTING ################################################################### # COUNTRIES_CAPITALS = {\"Canada\":", "easygui.enterbox(\"What is the capital of: \" + country + \"?\",", "python3 ####################### \"\"\"#################### Index: 1. Imports and Readme 2. Functions", "(Canada, France etc...) and Values (Paris, Ottawa) country_list_getter.main() COUNTRIES_CAPITALS =", "Index: 1. Imports and Readme 2. Functions 3. Main 4.", "50: return easygui.buttonbox(\"Your score: \" + str(score) + \". Do", "+ str(score) + \". Do you want to play again?\",", "playing = 1 else: playing = 0 #score_screen(correct_answers, total_questions) ask_to_replay(correct_answers,", "= input(\"Would you like to play again?: \\n\") if response.lower()", "or response == \"y\": playing = 1 else: playing =", "IMPORTS AND README ################################################################### import easygui import country_list_getter ################################################################### #", "+ \"?\", \"Country Capital Guesser!!\") ################################################################### # 3. MAIN ###################################################################", "Capital Guesser!!\") ################################################################### # 3. MAIN ################################################################### def funtime(): playing", "key + \"\\n\").lower() total_questions += 1 # Short for total_questions", "you want to play again?\", \"~/Documents/ComputerClub/assets/sad_puppy.jpg\", [\"Yes\", \"No\"]) def main_question_box(country):", "FUNCTIONS ################################################################### # Dictionary. It has keys (Canada, France etc...)", "str(correct_percent) + \"%)\") ################################################################### # 4. TESTING ################################################################### # COUNTRIES_CAPITALS", "COUNTRIES_CAPITALS[key]: correct_answers += 1 print(\"Correct!\") else: print(\"Wrong!\") # Should we", "play a game?\", \"Country Guesser\", (\"Yes\", \"No\")) def ask_to_replay(correct_answers, total_questions):", ">= 50: return easygui.buttonbox(\"Your score: \" + str(score) + \".", "1. Imports and Readme 2. Functions 3. Main 4. Testing", "== COUNTRIES_CAPITALS[key]: correct_answers += 1 print(\"Correct!\") else: print(\"Wrong!\") # Should", "country_list_getter.main() COUNTRIES_CAPITALS = country_list_getter.FINAL_LIST def ask_to_play(): return easygui.ynbox(\"Do you want", "easygui.buttonbox(\"Your score: \" + str(score) + \". Do you want", "MAIN ################################################################### def funtime(): playing = 1 correct_answers = 0", "1 # Short for total_questions = total_questions + 1 if", "Testing ####################\"\"\" ####################### ################################################################### # 1. IMPORTS AND README ###################################################################", "\" + str(score) + \". Do you want to play", "################################################################### # Dictionary. It has keys (Canada, France etc...) and", "funtime(): playing = 1 correct_answers = 0 total_questions = 0", "Dictionary. It has keys (Canada, France etc...) and Values (Paris,", "round(((correct_answers / total_questions) * 100), 2) if score >= 50:", "1. IMPORTS AND README ################################################################### import easygui import country_list_getter ###################################################################", "README ################################################################### import easygui import country_list_getter ################################################################### # 2. FUNCTIONS", "2. Functions 3. Main 4. Testing ####################\"\"\" ####################### ################################################################### #", "/usr/bin/env python3 ####################### \"\"\"#################### Index: 1. Imports and Readme 2.", "+ \"\\n\").lower() total_questions += 1 # Short for total_questions =", "total_questions = 0 ask_to_play() while playing: for key, value in", "4. TESTING ################################################################### # COUNTRIES_CAPITALS = {\"Canada\": \"Ottawa\", \"United States\":", "# 3. MAIN ################################################################### def funtime(): playing = 1 correct_answers", "COUNTRIES_CAPITALS[key] or answer.title() == COUNTRIES_CAPITALS[key]: correct_answers += 1 print(\"Correct!\") else:", "want to play again?\", \"~/Documents/ComputerClub/assets/sad_puppy.jpg\", [\"Yes\", \"No\"]) def main_question_box(country): return", "Do you want to play again?\", \"~/Documents/ComputerClub/assets/happy_puppy.jpg\", [\"Yes\", \"No\"]) else:", "################################################################### import easygui import country_list_getter ################################################################### # 2. FUNCTIONS ###################################################################", "+ str(correct_answers)+ \"/\" + str(total_questions) + \" (\" + str(correct_percent)", "game?\", \"Country Guesser\", (\"Yes\", \"No\")) def ask_to_replay(correct_answers, total_questions): score =", "total_questions) #print(\"You scored \" + str(correct_answers)+ \"/\" + str(total_questions) +", "to play again?\", \"~/Documents/ComputerClub/assets/happy_puppy.jpg\", [\"Yes\", \"No\"]) else: return easygui.buttonbox(\"Your score:", "to play again?\", \"~/Documents/ComputerClub/assets/sad_puppy.jpg\", [\"Yes\", \"No\"]) def main_question_box(country): return easygui.enterbox(\"What", "keep playing? response = input(\"Would you like to play again?:", "to play a game?\", \"Country Guesser\", (\"Yes\", \"No\")) def ask_to_replay(correct_answers,", "of: \" + key + \"\\n\").lower() total_questions += 1 #", "\\n\") if response.lower() == \"yes\" or response == \"y\": playing", "TESTING ################################################################### # COUNTRIES_CAPITALS = {\"Canada\": \"Ottawa\", \"United States\": \"Washington\",", "country_list_getter.FINAL_LIST def ask_to_play(): return easygui.ynbox(\"Do you want to play a", "main_question_box(country): return easygui.enterbox(\"What is the capital of: \" + country", "answer = main_question_box(key) # answer = input(\"Name the capital of:", "def ask_to_play(): return easygui.ynbox(\"Do you want to play a game?\",", "and Readme 2. Functions 3. Main 4. Testing ####################\"\"\" #######################", "else: return easygui.buttonbox(\"Your score: \" + str(score) + \". Do", "again?\", \"~/Documents/ComputerClub/assets/sad_puppy.jpg\", [\"Yes\", \"No\"]) def main_question_box(country): return easygui.enterbox(\"What is the", "################################################################### # 3. MAIN ################################################################### def funtime(): playing = 1" ]
[ "os import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from ac_utils.general import save_to_json, load_from_json", "break return found @click.command() @click.argument('dataset_path') def rekordbox_file_to_analysis_file(dataset_path): \"\"\" Read information", "def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file): collection = rekordbox_file.find('COLLECTION') found = False for", "{\"RekBox\": { \"bpm\": bpm_raw, } } save_to_json(out_file_path, analysis, verbose=True) if", "found = document break return found @click.command() @click.argument('dataset_path') def rekordbox_file_to_analysis_file(dataset_path):", "unquote def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file): collection = rekordbox_file.find('COLLECTION') found = False", "= {\"RekBox\": { \"bpm\": bpm_raw, } } save_to_json(out_file_path, analysis, verbose=True)", "if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1])", "import unquote def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file): collection = rekordbox_file.find('COLLECTION') found =", "information from rekordbox_rhythm.xml present in dataset_path and convert it into", "stored in the same folder and compatible with our evaluation", "collection = rekordbox_file.find('COLLECTION') found = False for document in collection:", "the same folder and compatible with our evaluation framework. \"\"\"", "directory when running outside pycharm import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)),", "entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file) if entry is not False: tempo_entry", "= load_from_json(os.path.join(dataset_path, 'metadata.json')) out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json') analysis = dict()", "= os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json') analysis = dict() with click.progressbar(metadata_file.keys(), label=\"Converting...\") as", "\"bpm\": bpm_raw, } } save_to_json(out_file_path, analysis, verbose=True) if __name__ ==", "to be stored in the same folder and compatible with", "our evaluation framework. \"\"\" rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot() metadata_file =", "convert it into analsysis_rhythm_rekordbox.json to be stored in the same", "bpm_raw, } } save_to_json(out_file_path, analysis, verbose=True) if __name__ == '__main__':", "to import from parent directory when running outside pycharm import", "os.pardir)) from ac_utils.general import save_to_json, load_from_json import click import xml.etree.ElementTree", "import save_to_json, load_from_json import click import xml.etree.ElementTree from urllib import", "not None: bpm_raw = float(tempo_entry.attrib['Bpm']) else: bpm_raw = 0.0 analysis[key]", "rekordbox_file_to_analysis_file(dataset_path): \"\"\" Read information from rekordbox_rhythm.xml present in dataset_path and", "if entry is not False: tempo_entry = entry.find('TEMPO') if tempo_entry", "document break return found @click.command() @click.argument('dataset_path') def rekordbox_file_to_analysis_file(dataset_path): \"\"\" Read", "from ac_utils.general import save_to_json, load_from_json import click import xml.etree.ElementTree from", "and convert it into analsysis_rhythm_rekordbox.json to be stored in the", "str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]): found = document break return found @click.command()", "parent directory when running outside pycharm import os import sys", "= find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file) if entry is not False: tempo_entry =", "found = False for document in collection: if str(sound_metadata['id']) in", "rekordbox_file) if entry is not False: tempo_entry = entry.find('TEMPO') if", "in document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]:", "click import xml.etree.ElementTree from urllib import unquote def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file):", "entry is not False: tempo_entry = entry.find('TEMPO') if tempo_entry is", "in dataset_path and convert it into analsysis_rhythm_rekordbox.json to be stored", "found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]: found =", "is not False: tempo_entry = entry.find('TEMPO') if tempo_entry is not", "str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in", "import click import xml.etree.ElementTree from urllib import unquote def find_corresponding_rekordbox_entry(sound_metadata,", "key in metadata_keys: entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file) if entry is", "metadata_keys: entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file) if entry is not False:", "entry.find('TEMPO') if tempo_entry is not None: bpm_raw = float(tempo_entry.attrib['Bpm']) else:", "same folder and compatible with our evaluation framework. \"\"\" rekordbox_file", "metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json')) out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json') analysis =", "@click.argument('dataset_path') def rekordbox_file_to_analysis_file(dataset_path): \"\"\" Read information from rekordbox_rhythm.xml present in", "Read information from rekordbox_rhythm.xml present in dataset_path and convert it", "if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1])", "load_from_json import click import xml.etree.ElementTree from urllib import unquote def", "None: bpm_raw = float(tempo_entry.attrib['Bpm']) else: bpm_raw = 0.0 analysis[key] =", "with our evaluation framework. \"\"\" rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot() metadata_file", "import from parent directory when running outside pycharm import os", "out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json') analysis = dict() with click.progressbar(metadata_file.keys(), label=\"Converting...\")", "float(tempo_entry.attrib['Bpm']) else: bpm_raw = 0.0 analysis[key] = {\"RekBox\": { \"bpm\":", "def rekordbox_file_to_analysis_file(dataset_path): \"\"\" Read information from rekordbox_rhythm.xml present in dataset_path", "document in collection: if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]: found = document", "str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in", "os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json') analysis = dict() with click.progressbar(metadata_file.keys(), label=\"Converting...\") as metadata_keys:", "= document break return found @click.command() @click.argument('dataset_path') def rekordbox_file_to_analysis_file(dataset_path): \"\"\"", "is not None: bpm_raw = float(tempo_entry.attrib['Bpm']) else: bpm_raw = 0.0", "Need this to import from parent directory when running outside", "for document in collection: if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]: found =", "in collection: if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]: found = document break", "into analsysis_rhythm_rekordbox.json to be stored in the same folder and", "break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]: found = document break if", "import xml.etree.ElementTree from urllib import unquote def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file): collection", "from parent directory when running outside pycharm import os import", "in the same folder and compatible with our evaluation framework.", "not False: tempo_entry = entry.find('TEMPO') if tempo_entry is not None:", "# Need this to import from parent directory when running", "tempo_entry = entry.find('TEMPO') if tempo_entry is not None: bpm_raw =", "False: tempo_entry = entry.find('TEMPO') if tempo_entry is not None: bpm_raw", "= xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot() metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json')) out_file_path = os.path.join(dataset_path,", "it into analsysis_rhythm_rekordbox.json to be stored in the same folder", "import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from ac_utils.general import save_to_json,", "0.0 analysis[key] = {\"RekBox\": { \"bpm\": bpm_raw, } } save_to_json(out_file_path,", "click.progressbar(metadata_file.keys(), label=\"Converting...\") as metadata_keys: for key in metadata_keys: entry =", "from urllib import unquote def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file): collection = rekordbox_file.find('COLLECTION')", "this to import from parent directory when running outside pycharm", "bpm_raw = 0.0 analysis[key] = {\"RekBox\": { \"bpm\": bpm_raw, }", "'rekordbox_rhythm.xml')).getroot() metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json')) out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json') analysis", "= False for document in collection: if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]:", "save_to_json, load_from_json import click import xml.etree.ElementTree from urllib import unquote", "document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]: found", "unquote(document.attrib['Location'].split('/')[-1]): found = document break return found @click.command() @click.argument('dataset_path') def", "xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot() metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json')) out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json')", "= entry.find('TEMPO') if tempo_entry is not None: bpm_raw = float(tempo_entry.attrib['Bpm'])", "{ \"bpm\": bpm_raw, } } save_to_json(out_file_path, analysis, verbose=True) if __name__", "document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]): found", "rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot() metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json')) out_file_path =", "found @click.command() @click.argument('dataset_path') def rekordbox_file_to_analysis_file(dataset_path): \"\"\" Read information from rekordbox_rhythm.xml", "'metadata.json')) out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json') analysis = dict() with click.progressbar(metadata_file.keys(),", "import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from ac_utils.general import save_to_json, load_from_json import", "tempo_entry is not None: bpm_raw = float(tempo_entry.attrib['Bpm']) else: bpm_raw =", "find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file): collection = rekordbox_file.find('COLLECTION') found = False for document", "bpm_raw = float(tempo_entry.attrib['Bpm']) else: bpm_raw = 0.0 analysis[key] = {\"RekBox\":", "in unquote(document.attrib['Location'].split('/')[-1]): found = document break return found @click.command() @click.argument('dataset_path')", "break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]): found = document break return", "dict() with click.progressbar(metadata_file.keys(), label=\"Converting...\") as metadata_keys: for key in metadata_keys:", "= document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]): found = document", "analysis[key] = {\"RekBox\": { \"bpm\": bpm_raw, } } save_to_json(out_file_path, analysis,", "else: bpm_raw = 0.0 analysis[key] = {\"RekBox\": { \"bpm\": bpm_raw,", "pycharm import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from ac_utils.general import", "= 0.0 analysis[key] = {\"RekBox\": { \"bpm\": bpm_raw, } }", "rekordbox_file): collection = rekordbox_file.find('COLLECTION') found = False for document in", "present in dataset_path and convert it into analsysis_rhythm_rekordbox.json to be", "document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]): found = document break", "analysis = dict() with click.progressbar(metadata_file.keys(), label=\"Converting...\") as metadata_keys: for key", "} } save_to_json(out_file_path, analysis, verbose=True) if __name__ == '__main__': rekordbox_file_to_analysis_file()", "sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from ac_utils.general import save_to_json, load_from_json import click import", "False for document in collection: if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]: found", "= rekordbox_file.find('COLLECTION') found = False for document in collection: if", "if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]): found = document break return found", "@click.command() @click.argument('dataset_path') def rekordbox_file_to_analysis_file(dataset_path): \"\"\" Read information from rekordbox_rhythm.xml present", "dataset_path and convert it into analsysis_rhythm_rekordbox.json to be stored in", "compatible with our evaluation framework. \"\"\" rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot()", "\"\"\" rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot() metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json')) out_file_path", "return found @click.command() @click.argument('dataset_path') def rekordbox_file_to_analysis_file(dataset_path): \"\"\" Read information from", "load_from_json(os.path.join(dataset_path, 'metadata.json')) out_file_path = os.path.join(dataset_path, 'analysis_rhythm_rekordbox.json') analysis = dict() with", "= document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]: found = document", "from rekordbox_rhythm.xml present in dataset_path and convert it into analsysis_rhythm_rekordbox.json", "with click.progressbar(metadata_file.keys(), label=\"Converting...\") as metadata_keys: for key in metadata_keys: entry", "if tempo_entry is not None: bpm_raw = float(tempo_entry.attrib['Bpm']) else: bpm_raw", "running outside pycharm import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from", "label=\"Converting...\") as metadata_keys: for key in metadata_keys: entry = find_corresponding_rekordbox_entry(metadata_file[key],", "found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]): found =", "rekordbox_rhythm.xml present in dataset_path and convert it into analsysis_rhythm_rekordbox.json to", "outside pycharm import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from ac_utils.general", "in metadata_keys: entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file) if entry is not", "= dict() with click.progressbar(metadata_file.keys(), label=\"Converting...\") as metadata_keys: for key in", "ac_utils.general import save_to_json, load_from_json import click import xml.etree.ElementTree from urllib", "= float(tempo_entry.attrib['Bpm']) else: bpm_raw = 0.0 analysis[key] = {\"RekBox\": {", "when running outside pycharm import os import sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir))", "rekordbox_file.find('COLLECTION') found = False for document in collection: if str(sound_metadata['id'])", "sys sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir)) from ac_utils.general import save_to_json, load_from_json import click", "framework. \"\"\" rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot() metadata_file = load_from_json(os.path.join(dataset_path, 'metadata.json'))", "'analysis_rhythm_rekordbox.json') analysis = dict() with click.progressbar(metadata_file.keys(), label=\"Converting...\") as metadata_keys: for", "find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file) if entry is not False: tempo_entry = entry.find('TEMPO')", "evaluation framework. \"\"\" rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path, 'rekordbox_rhythm.xml')).getroot() metadata_file = load_from_json(os.path.join(dataset_path,", "in document.attrib['Location'].split('/')[-1]: found = document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in unquote(document.attrib['Location'].split('/')[-1]):", "analsysis_rhythm_rekordbox.json to be stored in the same folder and compatible", "metadata_keys: for key in metadata_keys: entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file) if", "as metadata_keys: for key in metadata_keys: entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file)", "\"\"\" Read information from rekordbox_rhythm.xml present in dataset_path and convert", "and compatible with our evaluation framework. \"\"\" rekordbox_file = xml.etree.ElementTree.parse(os.path.join(dataset_path,", "for key in metadata_keys: entry = find_corresponding_rekordbox_entry(metadata_file[key], rekordbox_file) if entry", "folder and compatible with our evaluation framework. \"\"\" rekordbox_file =", "xml.etree.ElementTree from urllib import unquote def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file): collection =", "urllib import unquote def find_corresponding_rekordbox_entry(sound_metadata, rekordbox_file): collection = rekordbox_file.find('COLLECTION') found", "document break if str(sound_metadata['wav_sound_path'].split('/')[-1]) in document.attrib['Location'].split('/')[-1]: found = document break", "collection: if str(sound_metadata['id']) in document.attrib['Location'].split('/')[-1]: found = document break if", "be stored in the same folder and compatible with our" ]
[ "'', key) return key def getTestKey(self): return PartTestTemplate.generateTestKey(self.test_name) class BomItem(inventree.base.InventreeObject):", "the Parameter database model \"\"\" URL = 'part/parameter' def getunits(self):", "{ 'part_1': part1, 'part_2': part2, } # Send the data", "import inventree.base import inventree.stock import inventree.company import inventree.build logger =", "'bom' class InternalPrice(inventree.base.InventreeObject): \"\"\" Class representing the InternalPrice model \"\"\"", "**kwargs): return PartCategory.list(self._api, parent=self.pk, **kwargs) def get_category_parameter_templates(self, fetch_parent=True): \"\"\" fetch_parent:", "PartTestTemplate.generateTestKey(self.test_name) class BomItem(inventree.base.InventreeObject): \"\"\" Class representing the BomItem database model", "PartAttachment.list(self._api, part=self.pk) def uploadAttachment(self, attachment, comment=''): \"\"\" Upload an attachment", "the PartCategory database model \"\"\" URL = 'part/category' def getParts(self,", "test_name): \"\"\" Generate a 'key' for this test \"\"\" key", "\"\"\" return inventree.company.SupplierPart.list(self._api, part=self.pk) def getBomItems(self): \"\"\" Return the items", "Class representing the InternalPrice model \"\"\" URL = 'part/internal-price' @classmethod", "part \"\"\" return PartRelated.list(self._api, part=self.pk) def getInternalPriceList(self): \"\"\" Returns the", "units for this parameter \"\"\" return [element for element in", "\"\"\" Class representing a relationship between parts\"\"\" URL = 'part/related'", "for element in ParameterTemplate.list(self._api) if element['pk'] == self._data['template']] class ParameterTemplate(inventree.base.InventreeObject):", "key = test_name.strip().lower() key = key.replace(' ', '') # Remove", "this part \"\"\" return PartTestTemplate.list(self._api, part=self.pk) def getSupplierParts(self): \"\"\" Return", "items associated with this part \"\"\" return inventree.stock.StockItem.list(self._api, part=self.pk) def", "# Remove any characters that cannot be used to represent", "InternalPrice.list(self._api, part=self.pk) def setInternalPrice(self, quantity: int, price: float): \"\"\" Set", "# -*- coding: utf-8 -*- import logging import re import", "key.replace(' ', '') # Remove any characters that cannot be", "return None def getChildCategories(self, **kwargs): return PartCategory.list(self._api, parent=self.pk, **kwargs) def", "def getBuilds(self, **kwargs): \"\"\" Return the builds associated with this", "def getCategory(self): \"\"\" Return the part category associated with this", "between parts\"\"\" URL = 'part/related' @classmethod def add_related(cls, api, part1,", "part2, } # Send the data to the server if", "Return parameters associated with this part \"\"\" return Parameter.list(self._api, part=self.pk)", "def getAttachments(self): return PartAttachment.list(self._api, part=self.pk) def uploadAttachment(self, attachment, comment=''): \"\"\"", "price for this part \"\"\" return InternalPrice.setInternalPrice(self._api, self.pk, quantity, price)", "\"\"\" return PartCategory(self._api, self.category) def getTestTemplates(self): \"\"\" Return all test", "\"\"\" Return the items required to make this part \"\"\"", "None def getChildCategories(self, **kwargs): return PartCategory.list(self._api, parent=self.pk, **kwargs) def get_category_parameter_templates(self,", "return InternalPrice.list(self._api, part=self.pk) def setInternalPrice(self, quantity: int, price: float): \"\"\"", "class BomItem(inventree.base.InventreeObject): \"\"\" Class representing the BomItem database model \"\"\"", "= ['part'] class PartTestTemplate(inventree.base.InventreeObject): \"\"\" Class representing a test template", "URL = 'part/related' @classmethod def add_related(cls, api, part1, part2): data", "of all the parts this part is used in \"\"\"", "= 'part/attachment' REQUIRED_KWARGS = ['part'] class PartTestTemplate(inventree.base.InventreeObject): \"\"\" Class representing", "parent=self.pk, **kwargs) def get_category_parameter_templates(self, fetch_parent=True): \"\"\" fetch_parent: enable to fetch", "parent categories \"\"\" parameters_url = f'part/category/{self.pk}/parameters' return self.list(self._api, url=parameters_url, fetch_parent=fetch_parent)", "def get_category_parameter_templates(self, fetch_parent=True): \"\"\" fetch_parent: enable to fetch templates for", "(file) against this Part. Args: attachment: Either a string (filename)", "setInternalPrice(self, quantity: int, price: float): \"\"\" Set the internal price", "model \"\"\" URL = 'part/internal-price' @classmethod def setInternalPrice(cls, api, part,", "the data to the server return api.post(cls.URL, data) class PartRelated(inventree.base.InventreeObject):", "def isUsedIn(self): \"\"\" Return a list of all the parts", "part \"\"\" return Parameter.list(self._api, part=self.pk) def getRelated(self): \"\"\" Return related", "getParameters(self): \"\"\" Return parameters associated with this part \"\"\" return", "this Part. Args: attachment: Either a string (filename) or a", "\"\"\" fetch_parent: enable to fetch templates for parent categories \"\"\"", "self.parent) else: return None def getChildCategories(self, **kwargs): return PartCategory.list(self._api, parent=self.pk,", "ret = False return ret class Parameter(inventree.base.InventreeObject): \"\"\"class representing the", "= logging.getLogger('inventree') class PartCategory(inventree.base.InventreeObject): \"\"\" Class representing the PartCategory database", "utf-8 -*- import logging import re import inventree.base import inventree.stock", "getSupplierParts(self): \"\"\" Return the supplier parts associated with this part", "test_name.strip().lower() key = key.replace(' ', '') # Remove any characters", "return InternalPrice.setInternalPrice(self._api, self.pk, quantity, price) def getAttachments(self): return PartAttachment.list(self._api, part=self.pk)", "getInternalPriceList(self): \"\"\" Returns the InternalPrice list for this part \"\"\"", "return PartAttachment.upload( self._api, attachment, comment=comment, part=self.pk ) class PartAttachment(inventree.base.Attachment): \"\"\"", "any characters that cannot be used to represent a variable", "representing the Part database model \"\"\" URL = 'part' def", "part1, 'part_2': part2, } # Send the data to the", "\"\"\" Generate a 'key' for this test \"\"\" key =", "Return the items required to make this part \"\"\" return", "stock items associated with this part \"\"\" return inventree.stock.StockItem.list(self._api, part=self.pk)", "\"\"\" URL = 'part/internal-price' @classmethod def setInternalPrice(cls, api, part, quantity:", "getAttachments(self): return PartAttachment.list(self._api, part=self.pk) def uploadAttachment(self, attachment, comment=''): \"\"\" Upload", "return PartCategory.list(self._api, parent=self.pk, **kwargs) def get_category_parameter_templates(self, fetch_parent=True): \"\"\" fetch_parent: enable", "data to the server if api.post(cls.URL, data): logging.info(\"Related OK\") ret", "generateTestKey(cls, test_name): \"\"\" Generate a 'key' for this test \"\"\"", "Set the internal price for this part \"\"\" return InternalPrice.setInternalPrice(self._api,", "Args: attachment: Either a string (filename) or a file object", "\"\"\" URL = 'part/parameter' def getunits(self): \"\"\" Get the dimension", "cannot be used to represent a variable key = re.sub(r'[^a-zA-Z0-9]',", "f'part/category/{self.pk}/parameters' return self.list(self._api, url=parameters_url, fetch_parent=fetch_parent) class Part(inventree.base.ImageMixin, inventree.base.InventreeObject): \"\"\" Class", "} # Send the data to the server return api.post(cls.URL,", "this test \"\"\" key = test_name.strip().lower() key = key.replace(' ',", "= f'part/category/{self.pk}/parameters' return self.list(self._api, url=parameters_url, fetch_parent=fetch_parent) class Part(inventree.base.ImageMixin, inventree.base.InventreeObject): \"\"\"", "inventree.base import inventree.stock import inventree.company import inventree.build logger = logging.getLogger('inventree')", "part=self.pk, **kwargs) def getStockItems(self): \"\"\" Return the stock items associated", "= 'part/internal-price' @classmethod def setInternalPrice(cls, api, part, quantity: int, price:", "categories \"\"\" parameters_url = f'part/category/{self.pk}/parameters' return self.list(self._api, url=parameters_url, fetch_parent=fetch_parent) class", "\"\"\"class representing the Parameter database model \"\"\" URL = 'part/parameter'", "**kwargs) def getStockItems(self): \"\"\" Return the stock items associated with", "model \"\"\" URL = 'part/parameter' def getunits(self): \"\"\" Get the", "model \"\"\" URL = 'part' def getCategory(self): \"\"\" Return the", "self._api, attachment, comment=comment, part=self.pk ) class PartAttachment(inventree.base.Attachment): \"\"\" Class representing", "return [element for element in ParameterTemplate.list(self._api) if element['pk'] == self._data['template']]", "key def getTestKey(self): return PartTestTemplate.generateTestKey(self.test_name) class BomItem(inventree.base.InventreeObject): \"\"\" Class representing", "URL = 'part/category' def getParts(self, **kwargs): return Part.list(self._api, category=self.pk, **kwargs)", "Returns the InternalPrice list for this part \"\"\" return InternalPrice.list(self._api,", "self._data['template']] class ParameterTemplate(inventree.base.InventreeObject): \"\"\" class representing the Parameter Template database", "return PartTestTemplate.list(self._api, part=self.pk) def getSupplierParts(self): \"\"\" Return the supplier parts", "Class representing a test template for a Part \"\"\" URL", "\"\"\" Return the part category associated with this part \"\"\"", "Class representing the BomItem database model \"\"\" URL = 'bom'", "class PartTestTemplate(inventree.base.InventreeObject): \"\"\" Class representing a test template for a", "Part(inventree.base.ImageMixin, inventree.base.InventreeObject): \"\"\" Class representing the Part database model \"\"\"", "= test_name.strip().lower() key = key.replace(' ', '') # Remove any", "return api.post(cls.URL, data) class PartRelated(inventree.base.InventreeObject): \"\"\" Class representing a relationship", "True else: logging.warning(\"Related failed\") ret = False return ret class", "else: logging.warning(\"Related failed\") ret = False return ret class Parameter(inventree.base.InventreeObject):", "associated with this part \"\"\" return inventree.company.SupplierPart.list(self._api, part=self.pk) def getBomItems(self):", "that cannot be used to represent a variable key =", "fetch_parent=True): \"\"\" fetch_parent: enable to fetch templates for parent categories", "', '') # Remove any characters that cannot be used", "this part \"\"\" data = { 'part': part, 'quantity': quantity,", "fetch templates for parent categories \"\"\" parameters_url = f'part/category/{self.pk}/parameters' return", "used in \"\"\" return BomItem.list(self._api, sub_part=self.pk) def getBuilds(self, **kwargs): \"\"\"", "logging.info(\"Related OK\") ret = True else: logging.warning(\"Related failed\") ret =", "**kwargs) def getParentCategory(self): if self.parent: return PartCategory(self._api, self.parent) else: return", "PartCategory.list(self._api, parent=self.pk, **kwargs) def get_category_parameter_templates(self, fetch_parent=True): \"\"\" fetch_parent: enable to", "make this part \"\"\" return BomItem.list(self._api, part=self.pk) def isUsedIn(self): \"\"\"", "\"\"\" return inventree.stock.StockItem.list(self._api, part=self.pk) def getParameters(self): \"\"\" Return parameters associated", "-*- coding: utf-8 -*- import logging import re import inventree.base", "\"\"\" return InternalPrice.setInternalPrice(self._api, self.pk, quantity, price) def getAttachments(self): return PartAttachment.list(self._api,", "= 'part/category' def getParts(self, **kwargs): return Part.list(self._api, category=self.pk, **kwargs) def", "part=self.pk) def uploadAttachment(self, attachment, comment=''): \"\"\" Upload an attachment (file)", "part \"\"\" return BomItem.list(self._api, part=self.pk) def isUsedIn(self): \"\"\" Return a", "'part_2': part2, } # Send the data to the server", "part \"\"\" return inventree.stock.StockItem.list(self._api, part=self.pk) def getParameters(self): \"\"\" Return parameters", "Part \"\"\" URL = 'part/test-template' @classmethod def generateTestKey(cls, test_name): \"\"\"", "\"\"\" URL = 'part/test-template' @classmethod def generateTestKey(cls, test_name): \"\"\" Generate", "False return ret class Parameter(inventree.base.InventreeObject): \"\"\"class representing the Parameter database", "URL = 'part' def getCategory(self): \"\"\" Return the part category", "if element['pk'] == self._data['template']] class ParameterTemplate(inventree.base.InventreeObject): \"\"\" class representing the", "file object comment: Attachment comment \"\"\" return PartAttachment.upload( self._api, attachment,", "sub_part=self.pk) def getBuilds(self, **kwargs): \"\"\" Return the builds associated with", "\"\"\" URL = 'part/attachment' REQUIRED_KWARGS = ['part'] class PartTestTemplate(inventree.base.InventreeObject): \"\"\"", "element in ParameterTemplate.list(self._api) if element['pk'] == self._data['template']] class ParameterTemplate(inventree.base.InventreeObject): \"\"\"", "Return the stock items associated with this part \"\"\" return", "this parameter \"\"\" return [element for element in ParameterTemplate.list(self._api) if", "inventree.build logger = logging.getLogger('inventree') class PartCategory(inventree.base.InventreeObject): \"\"\" Class representing the", "**kwargs): \"\"\" Return the builds associated with this part \"\"\"", "return Parameter.list(self._api, part=self.pk) def getRelated(self): \"\"\" Return related parts associated", "ret = True else: logging.warning(\"Related failed\") ret = False return", "fetch_parent: enable to fetch templates for parent categories \"\"\" parameters_url", "internal price for this part \"\"\" return InternalPrice.setInternalPrice(self._api, self.pk, quantity,", "def generateTestKey(cls, test_name): \"\"\" Generate a 'key' for this test", "for this part \"\"\" data = { 'part': part, 'quantity':", "database model \"\"\" URL = 'part' def getCategory(self): \"\"\" Return", "getParentCategory(self): if self.parent: return PartCategory(self._api, self.parent) else: return None def", "def uploadAttachment(self, attachment, comment=''): \"\"\" Upload an attachment (file) against", "import inventree.stock import inventree.company import inventree.build logger = logging.getLogger('inventree') class", "with this part \"\"\" return inventree.stock.StockItem.list(self._api, part=self.pk) def getParameters(self): \"\"\"", "['part'] class PartTestTemplate(inventree.base.InventreeObject): \"\"\" Class representing a test template for", "with this part \"\"\" return inventree.build.Build.list(self._api, part=self.pk, **kwargs) def getStockItems(self):", "class Parameter(inventree.base.InventreeObject): \"\"\"class representing the Parameter database model \"\"\" URL", "<filename>inventree/part.py # -*- coding: utf-8 -*- import logging import re", "part \"\"\" return PartCategory(self._api, self.category) def getTestTemplates(self): \"\"\" Return all", "logging import re import inventree.base import inventree.stock import inventree.company import", "\"\"\" return BomItem.list(self._api, sub_part=self.pk) def getBuilds(self, **kwargs): \"\"\" Return the", "InternalPrice(inventree.base.InventreeObject): \"\"\" Class representing the InternalPrice model \"\"\" URL =", "used to represent a variable key = re.sub(r'[^a-zA-Z0-9]', '', key)", "the part category associated with this part \"\"\" return PartCategory(self._api,", "Part \"\"\" URL = 'part/attachment' REQUIRED_KWARGS = ['part'] class PartTestTemplate(inventree.base.InventreeObject):", "\"\"\" URL = 'part' def getCategory(self): \"\"\" Return the part", "get_category_parameter_templates(self, fetch_parent=True): \"\"\" fetch_parent: enable to fetch templates for parent", "@classmethod def setInternalPrice(cls, api, part, quantity: int, price: float): \"\"\"", "return ret class Parameter(inventree.base.InventreeObject): \"\"\"class representing the Parameter database model", "Class representing the PartCategory database model \"\"\" URL = 'part/category'", "Remove any characters that cannot be used to represent a", "characters that cannot be used to represent a variable key", "parts\"\"\" URL = 'part/related' @classmethod def add_related(cls, api, part1, part2):", "'quantity': quantity, 'price': price, } # Send the data to", "api.post(cls.URL, data): logging.info(\"Related OK\") ret = True else: logging.warning(\"Related failed\")", "a variable key = re.sub(r'[^a-zA-Z0-9]', '', key) return key def", "Parameter.list(self._api, part=self.pk) def getRelated(self): \"\"\" Return related parts associated with", "\"\"\" Class representing a test template for a Part \"\"\"", "part1, part2): data = { 'part_1': part1, 'part_2': part2, }", "\"\"\" return InternalPrice.list(self._api, part=self.pk) def setInternalPrice(self, quantity: int, price: float):", "REQUIRED_KWARGS = ['part'] class PartTestTemplate(inventree.base.InventreeObject): \"\"\" Class representing a test", "quantity, price) def getAttachments(self): return PartAttachment.list(self._api, part=self.pk) def uploadAttachment(self, attachment,", "attachment (file) against this Part. Args: attachment: Either a string", "model \"\"\" URL = 'bom' class InternalPrice(inventree.base.InventreeObject): \"\"\" Class representing", "getTestKey(self): return PartTestTemplate.generateTestKey(self.test_name) class BomItem(inventree.base.InventreeObject): \"\"\" Class representing the BomItem", "for this parameter \"\"\" return [element for element in ParameterTemplate.list(self._api)", "representing the BomItem database model \"\"\" URL = 'bom' class", "= 'part/test-template' @classmethod def generateTestKey(cls, test_name): \"\"\" Generate a 'key'", "self.category) def getTestTemplates(self): \"\"\" Return all test templates associated with", "representing the Parameter database model \"\"\" URL = 'part/parameter' def", "for this part \"\"\" return InternalPrice.setInternalPrice(self._api, self.pk, quantity, price) def", "-*- import logging import re import inventree.base import inventree.stock import", "getBomItems(self): \"\"\" Return the items required to make this part", "def getParameters(self): \"\"\" Return parameters associated with this part \"\"\"", "float): \"\"\" Set the internal price for this part \"\"\"", "getTestTemplates(self): \"\"\" Return all test templates associated with this part", "\"\"\" return inventree.build.Build.list(self._api, part=self.pk, **kwargs) def getStockItems(self): \"\"\" Return the", "InternalPrice model \"\"\" URL = 'part/internal-price' @classmethod def setInternalPrice(cls, api,", "the server return api.post(cls.URL, data) class PartRelated(inventree.base.InventreeObject): \"\"\" Class representing", "this part \"\"\" return PartCategory(self._api, self.category) def getTestTemplates(self): \"\"\" Return", "@classmethod def add_related(cls, api, part1, part2): data = { 'part_1':", "getunits(self): \"\"\" Get the dimension and units for this parameter", "data): logging.info(\"Related OK\") ret = True else: logging.warning(\"Related failed\") ret", "getBuilds(self, **kwargs): \"\"\" Return the builds associated with this part", "\"\"\" Return a list of all the parts this part", "return PartRelated.list(self._api, part=self.pk) def getInternalPriceList(self): \"\"\" Returns the InternalPrice list", "the parts this part is used in \"\"\" return BomItem.list(self._api,", "== self._data['template']] class ParameterTemplate(inventree.base.InventreeObject): \"\"\" class representing the Parameter Template", "a Part \"\"\" URL = 'part/test-template' @classmethod def generateTestKey(cls, test_name):", "InternalPrice list for this part \"\"\" return InternalPrice.list(self._api, part=self.pk) def", "URL = 'bom' class InternalPrice(inventree.base.InventreeObject): \"\"\" Class representing the InternalPrice", "PartTestTemplate(inventree.base.InventreeObject): \"\"\" Class representing a test template for a Part", "PartCategory(inventree.base.InventreeObject): \"\"\" Class representing the PartCategory database model \"\"\" URL", "inventree.build.Build.list(self._api, part=self.pk, **kwargs) def getStockItems(self): \"\"\" Return the stock items", "\"\"\" Get the dimension and units for this parameter \"\"\"", "return PartCategory(self._api, self.parent) else: return None def getChildCategories(self, **kwargs): return", "test template for a Part \"\"\" URL = 'part/test-template' @classmethod", "logger = logging.getLogger('inventree') class PartCategory(inventree.base.InventreeObject): \"\"\" Class representing the PartCategory", "# Send the data to the server return api.post(cls.URL, data)", "api.post(cls.URL, data) class PartRelated(inventree.base.InventreeObject): \"\"\" Class representing a relationship between", "data = { 'part_1': part1, 'part_2': part2, } # Send", "getCategory(self): \"\"\" Return the part category associated with this part", "\"\"\" Upload an attachment (file) against this Part. Args: attachment:", "relationship between parts\"\"\" URL = 'part/related' @classmethod def add_related(cls, api,", "\"\"\" return PartAttachment.upload( self._api, attachment, comment=comment, part=self.pk ) class PartAttachment(inventree.base.Attachment):", "to make this part \"\"\" return BomItem.list(self._api, part=self.pk) def isUsedIn(self):", "representing a file attachment for a Part \"\"\" URL =", "this part is used in \"\"\" return BomItem.list(self._api, sub_part=self.pk) def", "inventree.company.SupplierPart.list(self._api, part=self.pk) def getBomItems(self): \"\"\" Return the items required to", "all test templates associated with this part \"\"\" return PartTestTemplate.list(self._api,", "part, 'quantity': quantity, 'price': price, } # Send the data", "\"\"\" return [element for element in ParameterTemplate.list(self._api) if element['pk'] ==", "variable key = re.sub(r'[^a-zA-Z0-9]', '', key) return key def getTestKey(self):", "coding: utf-8 -*- import logging import re import inventree.base import", "isUsedIn(self): \"\"\" Return a list of all the parts this", "string (filename) or a file object comment: Attachment comment \"\"\"", "ParameterTemplate(inventree.base.InventreeObject): \"\"\" class representing the Parameter Template database model\"\"\" URL", "list of all the parts this part is used in", "= False return ret class Parameter(inventree.base.InventreeObject): \"\"\"class representing the Parameter", "part \"\"\" return inventree.company.SupplierPart.list(self._api, part=self.pk) def getBomItems(self): \"\"\" Return the", "ParameterTemplate.list(self._api) if element['pk'] == self._data['template']] class ParameterTemplate(inventree.base.InventreeObject): \"\"\" class representing", "parameters_url = f'part/category/{self.pk}/parameters' return self.list(self._api, url=parameters_url, fetch_parent=fetch_parent) class Part(inventree.base.ImageMixin, inventree.base.InventreeObject):", "setInternalPrice(cls, api, part, quantity: int, price: float): \"\"\" Set the", "this part \"\"\" return inventree.company.SupplierPart.list(self._api, part=self.pk) def getBomItems(self): \"\"\" Return", "import inventree.build logger = logging.getLogger('inventree') class PartCategory(inventree.base.InventreeObject): \"\"\" Class representing", "part=self.pk) def setInternalPrice(self, quantity: int, price: float): \"\"\" Set the", "= True else: logging.warning(\"Related failed\") ret = False return ret", "class InternalPrice(inventree.base.InventreeObject): \"\"\" Class representing the InternalPrice model \"\"\" URL", "items required to make this part \"\"\" return BomItem.list(self._api, part=self.pk)", "URL = 'part/attachment' REQUIRED_KWARGS = ['part'] class PartTestTemplate(inventree.base.InventreeObject): \"\"\" Class", "the stock items associated with this part \"\"\" return inventree.stock.StockItem.list(self._api,", "Part.list(self._api, category=self.pk, **kwargs) def getParentCategory(self): if self.parent: return PartCategory(self._api, self.parent)", "PartAttachment.upload( self._api, attachment, comment=comment, part=self.pk ) class PartAttachment(inventree.base.Attachment): \"\"\" Class", "self.pk, quantity, price) def getAttachments(self): return PartAttachment.list(self._api, part=self.pk) def uploadAttachment(self,", "key = re.sub(r'[^a-zA-Z0-9]', '', key) return key def getTestKey(self): return", "\"\"\" return Parameter.list(self._api, part=self.pk) def getRelated(self): \"\"\" Return related parts", "return PartAttachment.list(self._api, part=self.pk) def uploadAttachment(self, attachment, comment=''): \"\"\" Upload an", "Get the dimension and units for this parameter \"\"\" return", "the internal price for this part \"\"\" return InternalPrice.setInternalPrice(self._api, self.pk,", "= 'part' def getCategory(self): \"\"\" Return the part category associated", "price for this part \"\"\" data = { 'part': part,", "import logging import re import inventree.base import inventree.stock import inventree.company", "a list of all the parts this part is used", "BomItem.list(self._api, sub_part=self.pk) def getBuilds(self, **kwargs): \"\"\" Return the builds associated", "server return api.post(cls.URL, data) class PartRelated(inventree.base.InventreeObject): \"\"\" Class representing a", "\"\"\" Return all test templates associated with this part \"\"\"", "a file attachment for a Part \"\"\" URL = 'part/attachment'", "\"\"\" Return related parts associated with this part \"\"\" return", "part=self.pk ) class PartAttachment(inventree.base.Attachment): \"\"\" Class representing a file attachment", "return inventree.stock.StockItem.list(self._api, part=self.pk) def getParameters(self): \"\"\" Return parameters associated with", "attachment, comment=''): \"\"\" Upload an attachment (file) against this Part.", "Part database model \"\"\" URL = 'part' def getCategory(self): \"\"\"", "api, part, quantity: int, price: float): \"\"\" Set the internal", "Send the data to the server return api.post(cls.URL, data) class", "representing a relationship between parts\"\"\" URL = 'part/related' @classmethod def", "this part \"\"\" return PartRelated.list(self._api, part=self.pk) def getInternalPriceList(self): \"\"\" Returns", "element['pk'] == self._data['template']] class ParameterTemplate(inventree.base.InventreeObject): \"\"\" class representing the Parameter", "with this part \"\"\" return PartRelated.list(self._api, part=self.pk) def getInternalPriceList(self): \"\"\"", "PartCategory(self._api, self.category) def getTestTemplates(self): \"\"\" Return all test templates associated", "def getParts(self, **kwargs): return Part.list(self._api, category=self.pk, **kwargs) def getParentCategory(self): if", "PartCategory(self._api, self.parent) else: return None def getChildCategories(self, **kwargs): return PartCategory.list(self._api,", "def getParentCategory(self): if self.parent: return PartCategory(self._api, self.parent) else: return None", "def getChildCategories(self, **kwargs): return PartCategory.list(self._api, parent=self.pk, **kwargs) def get_category_parameter_templates(self, fetch_parent=True):", "test \"\"\" key = test_name.strip().lower() key = key.replace(' ', '')", "return PartTestTemplate.generateTestKey(self.test_name) class BomItem(inventree.base.InventreeObject): \"\"\" Class representing the BomItem database", "a 'key' for this test \"\"\" key = test_name.strip().lower() key", "URL = 'part/internal-price' @classmethod def setInternalPrice(cls, api, part, quantity: int,", "add_related(cls, api, part1, part2): data = { 'part_1': part1, 'part_2':", "ret class Parameter(inventree.base.InventreeObject): \"\"\"class representing the Parameter database model \"\"\"", "the internal price for this part \"\"\" data = {", "\"\"\" Class representing the BomItem database model \"\"\" URL =", "BomItem database model \"\"\" URL = 'bom' class InternalPrice(inventree.base.InventreeObject): \"\"\"", "self.parent: return PartCategory(self._api, self.parent) else: return None def getChildCategories(self, **kwargs):", "} # Send the data to the server if api.post(cls.URL,", "'part' def getCategory(self): \"\"\" Return the part category associated with", "failed\") ret = False return ret class Parameter(inventree.base.InventreeObject): \"\"\"class representing", "parts associated with this part \"\"\" return PartRelated.list(self._api, part=self.pk) def", "part category associated with this part \"\"\" return PartCategory(self._api, self.category)", "a file object comment: Attachment comment \"\"\" return PartAttachment.upload( self._api,", "logging.warning(\"Related failed\") ret = False return ret class Parameter(inventree.base.InventreeObject): \"\"\"class", "'price': price, } # Send the data to the server", "# Send the data to the server if api.post(cls.URL, data):", "comment=''): \"\"\" Upload an attachment (file) against this Part. Args:", "**kwargs) def get_category_parameter_templates(self, fetch_parent=True): \"\"\" fetch_parent: enable to fetch templates", "'key' for this test \"\"\" key = test_name.strip().lower() key =", "and units for this parameter \"\"\" return [element for element", "\"\"\" key = test_name.strip().lower() key = key.replace(' ', '') #", "'') # Remove any characters that cannot be used to", "this part \"\"\" return InternalPrice.setInternalPrice(self._api, self.pk, quantity, price) def getAttachments(self):", "part \"\"\" return PartTestTemplate.list(self._api, part=self.pk) def getSupplierParts(self): \"\"\" Return the", "the items required to make this part \"\"\" return BomItem.list(self._api,", "this part \"\"\" return BomItem.list(self._api, part=self.pk) def isUsedIn(self): \"\"\" Return", "Parameter(inventree.base.InventreeObject): \"\"\"class representing the Parameter database model \"\"\" URL =", "price) def getAttachments(self): return PartAttachment.list(self._api, part=self.pk) def uploadAttachment(self, attachment, comment=''):", "in \"\"\" return BomItem.list(self._api, sub_part=self.pk) def getBuilds(self, **kwargs): \"\"\" Return", "\"\"\" return PartTestTemplate.list(self._api, part=self.pk) def getSupplierParts(self): \"\"\" Return the supplier", "\"\"\" Returns the InternalPrice list for this part \"\"\" return", "data to the server return api.post(cls.URL, data) class PartRelated(inventree.base.InventreeObject): \"\"\"", "part=self.pk) def getInternalPriceList(self): \"\"\" Returns the InternalPrice list for this", "database model \"\"\" URL = 'part/category' def getParts(self, **kwargs): return", "Part. Args: attachment: Either a string (filename) or a file", "this part \"\"\" return inventree.build.Build.list(self._api, part=self.pk, **kwargs) def getStockItems(self): \"\"\"", "part \"\"\" return InternalPrice.list(self._api, part=self.pk) def setInternalPrice(self, quantity: int, price:", "def getRelated(self): \"\"\" Return related parts associated with this part", "inventree.company import inventree.build logger = logging.getLogger('inventree') class PartCategory(inventree.base.InventreeObject): \"\"\" Class", "\"\"\" return PartRelated.list(self._api, part=self.pk) def getInternalPriceList(self): \"\"\" Returns the InternalPrice", "part=self.pk) def isUsedIn(self): \"\"\" Return a list of all the", "a test template for a Part \"\"\" URL = 'part/test-template'", "the supplier parts associated with this part \"\"\" return inventree.company.SupplierPart.list(self._api,", "list for this part \"\"\" return InternalPrice.list(self._api, part=self.pk) def setInternalPrice(self,", "\"\"\" Set the internal price for this part \"\"\" data", "= 'part/parameter' def getunits(self): \"\"\" Get the dimension and units", "PartAttachment(inventree.base.Attachment): \"\"\" Class representing a file attachment for a Part", "for a Part \"\"\" URL = 'part/attachment' REQUIRED_KWARGS = ['part']", "with this part \"\"\" return Parameter.list(self._api, part=self.pk) def getRelated(self): \"\"\"", "part2): data = { 'part_1': part1, 'part_2': part2, } #", "Return the builds associated with this part \"\"\" return inventree.build.Build.list(self._api,", "'part/parameter' def getunits(self): \"\"\" Get the dimension and units for", "for a Part \"\"\" URL = 'part/test-template' @classmethod def generateTestKey(cls,", "associated with this part \"\"\" return inventree.build.Build.list(self._api, part=self.pk, **kwargs) def", "= 'bom' class InternalPrice(inventree.base.InventreeObject): \"\"\" Class representing the InternalPrice model", "getParts(self, **kwargs): return Part.list(self._api, category=self.pk, **kwargs) def getParentCategory(self): if self.parent:", "this part \"\"\" return Parameter.list(self._api, part=self.pk) def getRelated(self): \"\"\" Return", "def add_related(cls, api, part1, part2): data = { 'part_1': part1,", "with this part \"\"\" return PartCategory(self._api, self.category) def getTestTemplates(self): \"\"\"", ") class PartAttachment(inventree.base.Attachment): \"\"\" Class representing a file attachment for", "fetch_parent=fetch_parent) class Part(inventree.base.ImageMixin, inventree.base.InventreeObject): \"\"\" Class representing the Part database", "comment \"\"\" return PartAttachment.upload( self._api, attachment, comment=comment, part=self.pk ) class", "the Part database model \"\"\" URL = 'part' def getCategory(self):", "the InternalPrice model \"\"\" URL = 'part/internal-price' @classmethod def setInternalPrice(cls,", "enable to fetch templates for parent categories \"\"\" parameters_url =", "def getBomItems(self): \"\"\" Return the items required to make this", "required to make this part \"\"\" return BomItem.list(self._api, part=self.pk) def", "part is used in \"\"\" return BomItem.list(self._api, sub_part=self.pk) def getBuilds(self,", "\"\"\" Return the builds associated with this part \"\"\" return", "else: return None def getChildCategories(self, **kwargs): return PartCategory.list(self._api, parent=self.pk, **kwargs)", "parts this part is used in \"\"\" return BomItem.list(self._api, sub_part=self.pk)", "def getunits(self): \"\"\" Get the dimension and units for this", "PartCategory database model \"\"\" URL = 'part/category' def getParts(self, **kwargs):", "inventree.base.InventreeObject): \"\"\" Class representing the Part database model \"\"\" URL", "parameters associated with this part \"\"\" return Parameter.list(self._api, part=self.pk) def", "Either a string (filename) or a file object comment: Attachment", "associated with this part \"\"\" return inventree.stock.StockItem.list(self._api, part=self.pk) def getParameters(self):", "**kwargs): return Part.list(self._api, category=self.pk, **kwargs) def getParentCategory(self): if self.parent: return", "Return the supplier parts associated with this part \"\"\" return", "re.sub(r'[^a-zA-Z0-9]', '', key) return key def getTestKey(self): return PartTestTemplate.generateTestKey(self.test_name) class", "'part/internal-price' @classmethod def setInternalPrice(cls, api, part, quantity: int, price: float):", "part=self.pk) def getSupplierParts(self): \"\"\" Return the supplier parts associated with", "getRelated(self): \"\"\" Return related parts associated with this part \"\"\"", "category=self.pk, **kwargs) def getParentCategory(self): if self.parent: return PartCategory(self._api, self.parent) else:", "\"\"\" Class representing a file attachment for a Part \"\"\"", "part=self.pk) def getParameters(self): \"\"\" Return parameters associated with this part", "\"\"\" Return parameters associated with this part \"\"\" return Parameter.list(self._api,", "PartRelated(inventree.base.InventreeObject): \"\"\" Class representing a relationship between parts\"\"\" URL =", "part \"\"\" data = { 'part': part, 'quantity': quantity, 'price':", "with this part \"\"\" return PartTestTemplate.list(self._api, part=self.pk) def getSupplierParts(self): \"\"\"", "key = key.replace(' ', '') # Remove any characters that", "\"\"\" parameters_url = f'part/category/{self.pk}/parameters' return self.list(self._api, url=parameters_url, fetch_parent=fetch_parent) class Part(inventree.base.ImageMixin,", "return BomItem.list(self._api, part=self.pk) def isUsedIn(self): \"\"\" Return a list of", "be used to represent a variable key = re.sub(r'[^a-zA-Z0-9]', '',", "Class representing a file attachment for a Part \"\"\" URL", "object comment: Attachment comment \"\"\" return PartAttachment.upload( self._api, attachment, comment=comment,", "part, quantity: int, price: float): \"\"\" Set the internal price", "a Part \"\"\" URL = 'part/attachment' REQUIRED_KWARGS = ['part'] class", "for this part \"\"\" return InternalPrice.list(self._api, part=self.pk) def setInternalPrice(self, quantity:", "quantity: int, price: float): \"\"\" Set the internal price for", "import re import inventree.base import inventree.stock import inventree.company import inventree.build", "= re.sub(r'[^a-zA-Z0-9]', '', key) return key def getTestKey(self): return PartTestTemplate.generateTestKey(self.test_name)", "price, } # Send the data to the server return", "\"\"\" Return the stock items associated with this part \"\"\"", "\"\"\" data = { 'part': part, 'quantity': quantity, 'price': price,", "return inventree.build.Build.list(self._api, part=self.pk, **kwargs) def getStockItems(self): \"\"\" Return the stock", "def getInternalPriceList(self): \"\"\" Returns the InternalPrice list for this part", "or a file object comment: Attachment comment \"\"\" return PartAttachment.upload(", "the server if api.post(cls.URL, data): logging.info(\"Related OK\") ret = True", "class PartCategory(inventree.base.InventreeObject): \"\"\" Class representing the PartCategory database model \"\"\"", "comment=comment, part=self.pk ) class PartAttachment(inventree.base.Attachment): \"\"\" Class representing a file", "= { 'part_1': part1, 'part_2': part2, } # Send the", "database model \"\"\" URL = 'bom' class InternalPrice(inventree.base.InventreeObject): \"\"\" Class", "Upload an attachment (file) against this Part. Args: attachment: Either", "associated with this part \"\"\" return PartRelated.list(self._api, part=self.pk) def getInternalPriceList(self):", "\"\"\" URL = 'part/category' def getParts(self, **kwargs): return Part.list(self._api, category=self.pk,", "category associated with this part \"\"\" return PartCategory(self._api, self.category) def", "\"\"\" Class representing the InternalPrice model \"\"\" URL = 'part/internal-price'", "@classmethod def generateTestKey(cls, test_name): \"\"\" Generate a 'key' for this", "'part/test-template' @classmethod def generateTestKey(cls, test_name): \"\"\" Generate a 'key' for", "OK\") ret = True else: logging.warning(\"Related failed\") ret = False", "Generate a 'key' for this test \"\"\" key = test_name.strip().lower()", "data) class PartRelated(inventree.base.InventreeObject): \"\"\" Class representing a relationship between parts\"\"\"", "\"\"\" class representing the Parameter Template database model\"\"\" URL =", "'part/related' @classmethod def add_related(cls, api, part1, part2): data = {", "'part/category' def getParts(self, **kwargs): return Part.list(self._api, category=self.pk, **kwargs) def getParentCategory(self):", "represent a variable key = re.sub(r'[^a-zA-Z0-9]', '', key) return key", "model \"\"\" URL = 'part/category' def getParts(self, **kwargs): return Part.list(self._api,", "the dimension and units for this parameter \"\"\" return [element", "class PartAttachment(inventree.base.Attachment): \"\"\" Class representing a file attachment for a", "Return the part category associated with this part \"\"\" return", "for this test \"\"\" key = test_name.strip().lower() key = key.replace('", "def getSupplierParts(self): \"\"\" Return the supplier parts associated with this", "the builds associated with this part \"\"\" return inventree.build.Build.list(self._api, part=self.pk,", "logging.getLogger('inventree') class PartCategory(inventree.base.InventreeObject): \"\"\" Class representing the PartCategory database model", "return inventree.company.SupplierPart.list(self._api, part=self.pk) def getBomItems(self): \"\"\" Return the items required", "server if api.post(cls.URL, data): logging.info(\"Related OK\") ret = True else:", "in ParameterTemplate.list(self._api) if element['pk'] == self._data['template']] class ParameterTemplate(inventree.base.InventreeObject): \"\"\" class", "BomItem.list(self._api, part=self.pk) def isUsedIn(self): \"\"\" Return a list of all", "key) return key def getTestKey(self): return PartTestTemplate.generateTestKey(self.test_name) class BomItem(inventree.base.InventreeObject): \"\"\"", "\"\"\" Class representing the PartCategory database model \"\"\" URL =", "return self.list(self._api, url=parameters_url, fetch_parent=fetch_parent) class Part(inventree.base.ImageMixin, inventree.base.InventreeObject): \"\"\" Class representing", "Return a list of all the parts this part is", "representing the InternalPrice model \"\"\" URL = 'part/internal-price' @classmethod def", "def setInternalPrice(cls, api, part, quantity: int, price: float): \"\"\" Set", "data = { 'part': part, 'quantity': quantity, 'price': price, }", "templates for parent categories \"\"\" parameters_url = f'part/category/{self.pk}/parameters' return self.list(self._api,", "import inventree.company import inventree.build logger = logging.getLogger('inventree') class PartCategory(inventree.base.InventreeObject): \"\"\"", "getChildCategories(self, **kwargs): return PartCategory.list(self._api, parent=self.pk, **kwargs) def get_category_parameter_templates(self, fetch_parent=True): \"\"\"", "Class representing a relationship between parts\"\"\" URL = 'part/related' @classmethod", "\"\"\" Class representing the Part database model \"\"\" URL =", "'part_1': part1, 'part_2': part2, } # Send the data to", "test templates associated with this part \"\"\" return PartTestTemplate.list(self._api, part=self.pk)", "InternalPrice.setInternalPrice(self._api, self.pk, quantity, price) def getAttachments(self): return PartAttachment.list(self._api, part=self.pk) def", "parts associated with this part \"\"\" return inventree.company.SupplierPart.list(self._api, part=self.pk) def", "the InternalPrice list for this part \"\"\" return InternalPrice.list(self._api, part=self.pk)", "to represent a variable key = re.sub(r'[^a-zA-Z0-9]', '', key) return", "to fetch templates for parent categories \"\"\" parameters_url = f'part/category/{self.pk}/parameters'", "dimension and units for this parameter \"\"\" return [element for", "= 'part/related' @classmethod def add_related(cls, api, part1, part2): data =", "templates associated with this part \"\"\" return PartTestTemplate.list(self._api, part=self.pk) def", "api, part1, part2): data = { 'part_1': part1, 'part_2': part2,", "PartRelated.list(self._api, part=self.pk) def getInternalPriceList(self): \"\"\" Returns the InternalPrice list for", "part=self.pk) def getBomItems(self): \"\"\" Return the items required to make", "representing a test template for a Part \"\"\" URL =", "def getTestKey(self): return PartTestTemplate.generateTestKey(self.test_name) class BomItem(inventree.base.InventreeObject): \"\"\" Class representing the", "an attachment (file) against this Part. Args: attachment: Either a", "(filename) or a file object comment: Attachment comment \"\"\" return", "attachment for a Part \"\"\" URL = 'part/attachment' REQUIRED_KWARGS =", "Parameter database model \"\"\" URL = 'part/parameter' def getunits(self): \"\"\"", "def getTestTemplates(self): \"\"\" Return all test templates associated with this", "against this Part. Args: attachment: Either a string (filename) or", "Class representing the Part database model \"\"\" URL = 'part'", "def getStockItems(self): \"\"\" Return the stock items associated with this", "{ 'part': part, 'quantity': quantity, 'price': price, } # Send", "this part \"\"\" return InternalPrice.list(self._api, part=self.pk) def setInternalPrice(self, quantity: int,", "\"\"\" return BomItem.list(self._api, part=self.pk) def isUsedIn(self): \"\"\" Return a list", "url=parameters_url, fetch_parent=fetch_parent) class Part(inventree.base.ImageMixin, inventree.base.InventreeObject): \"\"\" Class representing the Part", "if self.parent: return PartCategory(self._api, self.parent) else: return None def getChildCategories(self,", "class ParameterTemplate(inventree.base.InventreeObject): \"\"\" class representing the Parameter Template database model\"\"\"", "inventree.stock import inventree.company import inventree.build logger = logging.getLogger('inventree') class PartCategory(inventree.base.InventreeObject):", "Send the data to the server if api.post(cls.URL, data): logging.info(\"Related", "int, price: float): \"\"\" Set the internal price for this", "class Part(inventree.base.ImageMixin, inventree.base.InventreeObject): \"\"\" Class representing the Part database model", "file attachment for a Part \"\"\" URL = 'part/attachment' REQUIRED_KWARGS", "the data to the server if api.post(cls.URL, data): logging.info(\"Related OK\")", "'part/attachment' REQUIRED_KWARGS = ['part'] class PartTestTemplate(inventree.base.InventreeObject): \"\"\" Class representing a", "re import inventree.base import inventree.stock import inventree.company import inventree.build logger", "for parent categories \"\"\" parameters_url = f'part/category/{self.pk}/parameters' return self.list(self._api, url=parameters_url,", "PartTestTemplate.list(self._api, part=self.pk) def getSupplierParts(self): \"\"\" Return the supplier parts associated", "= { 'part': part, 'quantity': quantity, 'price': price, } #", "to the server if api.post(cls.URL, data): logging.info(\"Related OK\") ret =", "related parts associated with this part \"\"\" return PartRelated.list(self._api, part=self.pk)", "return key def getTestKey(self): return PartTestTemplate.generateTestKey(self.test_name) class BomItem(inventree.base.InventreeObject): \"\"\" Class", "to the server return api.post(cls.URL, data) class PartRelated(inventree.base.InventreeObject): \"\"\" Class", "'part': part, 'quantity': quantity, 'price': price, } # Send the", "is used in \"\"\" return BomItem.list(self._api, sub_part=self.pk) def getBuilds(self, **kwargs):", "class PartRelated(inventree.base.InventreeObject): \"\"\" Class representing a relationship between parts\"\"\" URL", "= key.replace(' ', '') # Remove any characters that cannot", "inventree.stock.StockItem.list(self._api, part=self.pk) def getParameters(self): \"\"\" Return parameters associated with this", "return Part.list(self._api, category=self.pk, **kwargs) def getParentCategory(self): if self.parent: return PartCategory(self._api,", "URL = 'part/test-template' @classmethod def generateTestKey(cls, test_name): \"\"\" Generate a", "[element for element in ParameterTemplate.list(self._api) if element['pk'] == self._data['template']] class", "associated with this part \"\"\" return Parameter.list(self._api, part=self.pk) def getRelated(self):", "supplier parts associated with this part \"\"\" return inventree.company.SupplierPart.list(self._api, part=self.pk)", "\"\"\" Set the internal price for this part \"\"\" return", "uploadAttachment(self, attachment, comment=''): \"\"\" Upload an attachment (file) against this", "part=self.pk) def getRelated(self): \"\"\" Return related parts associated with this", "self.list(self._api, url=parameters_url, fetch_parent=fetch_parent) class Part(inventree.base.ImageMixin, inventree.base.InventreeObject): \"\"\" Class representing the", "if api.post(cls.URL, data): logging.info(\"Related OK\") ret = True else: logging.warning(\"Related", "Set the internal price for this part \"\"\" data =", "all the parts this part is used in \"\"\" return", "\"\"\" Return the supplier parts associated with this part \"\"\"", "quantity, 'price': price, } # Send the data to the", "getStockItems(self): \"\"\" Return the stock items associated with this part", "this part \"\"\" return inventree.stock.StockItem.list(self._api, part=self.pk) def getParameters(self): \"\"\" Return", "template for a Part \"\"\" URL = 'part/test-template' @classmethod def", "parameter \"\"\" return [element for element in ParameterTemplate.list(self._api) if element['pk']", "a string (filename) or a file object comment: Attachment comment", "with this part \"\"\" return inventree.company.SupplierPart.list(self._api, part=self.pk) def getBomItems(self): \"\"\"", "the BomItem database model \"\"\" URL = 'bom' class InternalPrice(inventree.base.InventreeObject):", "class representing the Parameter Template database model\"\"\" URL = 'part/parameter/template'", "Return related parts associated with this part \"\"\" return PartRelated.list(self._api,", "price: float): \"\"\" Set the internal price for this part", "URL = 'part/parameter' def getunits(self): \"\"\" Get the dimension and", "associated with this part \"\"\" return PartTestTemplate.list(self._api, part=self.pk) def getSupplierParts(self):", "\"\"\" URL = 'bom' class InternalPrice(inventree.base.InventreeObject): \"\"\" Class representing the", "part \"\"\" return InternalPrice.setInternalPrice(self._api, self.pk, quantity, price) def getAttachments(self): return", "internal price for this part \"\"\" data = { 'part':", "a relationship between parts\"\"\" URL = 'part/related' @classmethod def add_related(cls,", "database model \"\"\" URL = 'part/parameter' def getunits(self): \"\"\" Get", "builds associated with this part \"\"\" return inventree.build.Build.list(self._api, part=self.pk, **kwargs)", "attachment: Either a string (filename) or a file object comment:", "associated with this part \"\"\" return PartCategory(self._api, self.category) def getTestTemplates(self):", "comment: Attachment comment \"\"\" return PartAttachment.upload( self._api, attachment, comment=comment, part=self.pk", "Attachment comment \"\"\" return PartAttachment.upload( self._api, attachment, comment=comment, part=self.pk )", "attachment, comment=comment, part=self.pk ) class PartAttachment(inventree.base.Attachment): \"\"\" Class representing a", "BomItem(inventree.base.InventreeObject): \"\"\" Class representing the BomItem database model \"\"\" URL", "return PartCategory(self._api, self.category) def getTestTemplates(self): \"\"\" Return all test templates", "part \"\"\" return inventree.build.Build.list(self._api, part=self.pk, **kwargs) def getStockItems(self): \"\"\" Return", "Return all test templates associated with this part \"\"\" return", "representing the PartCategory database model \"\"\" URL = 'part/category' def", "def setInternalPrice(self, quantity: int, price: float): \"\"\" Set the internal", "return BomItem.list(self._api, sub_part=self.pk) def getBuilds(self, **kwargs): \"\"\" Return the builds" ]
[ "assert r.status == 200 await r.release() r = await client.head('/b')", "dir name', 'test dir file .txt', 'test text file folder')", "# Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client =", "('/a:b', '/a%3Ab'), ]) async def test_url_escaping(aiohttp_client, registered_path, request_url): \"\"\" Tests", "client.get('/b') assert r.status == 200 await r.release() r = await", "open(my_file_path, 'w') as fw: fw.write('world') app = web.Application() # Register", "== status if data: assert r.headers['Content-Type'] == \"text/html; charset=utf-8\" read_", "async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests accessing non-existing resource Try", "# Register global static route: app.router.add_static('/', tmp_dir_path, follow_symlinks=True) client =", "def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client): \"\"\" Tests the unauthorized access to a", "with open(my_file_path, 'w') as fw: fw.write(data) my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink')", "handler) client = await aiohttp_client(app) r = await client.get(request_url) assert", "= MagicMock() path.joinpath.return_value = path path.resolve.return_value = path path.iterdir.return_value.__iter__.side_effect =", "assert data == b'hello' def test_system_route(): route = SystemRoute(web.HTTPCreated(reason='test')) with", "[ ('/a:b', '/a:b'), ('/a@b', '/a@b'), ('/a:b', '/a%3Ab'), ]) async def", "MyView) ]) client = await aiohttp_client(app) r = await client.get(\"/a\")", "\"\"\" app = web.Application() async def handler(request): return web.Response() app.router.add_get(registered_path,", "as fw: fw.write('world') app = web.Application() # Register global static", "assert r.status == status if data: assert r.headers['Content-Type'] == \"text/html;", "fw: fw.write('hello') my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path,", "]) client = await aiohttp_client(app) r = await client.get(\"/a\") assert", "'/a@b'), ('/a:b', '/a%3Ab'), ]) async def test_url_escaping(aiohttp_client, registered_path, request_url): \"\"\"", "MyView(web.View): async def get(self): return web.Response() async def post(self): return", "r.release() @pytest.mark.parametrize(\"path\", [ '/a', '/{a}', ]) def test_reuse_last_added_resource(path): \"\"\" Test", "aiohttp_client): \"\"\" Tests the access to a symlink, in static", "global static route: app.router.add_static('/', tmp_dir_path, follow_symlinks=True) client = await aiohttp_client(app)", "assert r.headers['Content-Type'] == \"text/html; charset=utf-8\" read_ = (await r.read()) assert", "path) path.resolve.return_value = path special.resolve.return_value = special path_constructor.return_value = path", "as fw: fw.write(data) app = web.Application() url = os.path.join('/', dir_name,", "test. \"\"\" # Temporary directory. tmp_dir = tempfile.mkdtemp() def teardown():", "('', 'test file.txt', 'test text'), ('test dir name', 'test dir", "def handler(_): return web.Response() app.router.add_get('/a', handler, name='a') app.router.add_get('/b', handler, allow_head=False,", "we directory index should be shown or not. \"\"\" #", "= False path = MagicMock() path.joinpath.side_effect = lambda p: (special", "file.txt', 'test text'), ('test dir name', 'test dir file .txt',", "= (await r.read()) assert read_ == data async def test_follow_symlink(tmp_dir_path,", "file server, and make sure that correct HTTP statuses are", "SystemRoute @pytest.fixture(scope='function') def tmp_dir_path(request): \"\"\" Give a path for a", "await client.head('/a') assert r.status == 200 await r.release() r =", "from aiohttp import abc, web from aiohttp.web_urldispatcher import SystemRoute @pytest.fixture(scope='function')", "name=\"a\") assert route.resource.raw_match(\"/a\") route = app.router.add_get(\"/{b}\", handler, name=\"b\") assert route.resource.raw_match(\"/{b}\")", "read_ == data async def test_follow_symlink(tmp_dir_path, aiohttp_client): \"\"\" Tests the", "if a special resource is accessed (f.e. named pipe or", "path for a temporary directory The directory is destroyed at", "await aiohttp_client(app) # Request the root of the static directory.", "the root of the static directory. r = await client.get('/non_existing_resource')", "'w') as fw: fw.write('world') app = web.Application() # Register global", "= os.path.join(my_dir_path, filename) with open(my_file_path, 'w') as fw: fw.write(data) app", "def get(self): return web.Response() async def post(self): return web.Response() app", "web.Application() # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client", "access to a symlink, in static folder \"\"\" data =", "app.router.add_static('/', tmp_dir_path, follow_symlinks=True) client = await aiohttp_client(app) # Request the", "tmp_dir_path, show_index=True) client = await aiohttp_client(app) # Request the root", "await client.get(prefix) assert r.status == status if data: assert r.headers['Content-Type']", "await client.get(url) assert r.status == 200 assert (await r.text()) ==", "= PermissionError() path_constructor.return_value = path # Register global static route:", "could not be resolved. \"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path,", "def handler(request): return web.Response() app.router.add_get(path, handler, name=\"a\") app.router.add_post(path, handler, name=\"a\")", "r.status == 200 assert (await r.text()) == data @pytest.mark.parametrize('dir_name,filename,data', [", "web.Application() url = os.path.join('/', dir_name, filename) app.router.add_static('/', tmp_dir_path) client =", "'hello world' my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path,", "web.HTTPPreconditionFailed() app = web.Application(router=MyRouter()) client = await aiohttp_client(app) resp =", "file nor a directory. Checks that if a special resource", "'test text'), ('test dir name', 'test dir file .txt', 'test", "]) def test_reuse_last_added_resource(path): \"\"\" Test that adding a route with", "= web.Application() async def handler(data, request): return web.Response(body=data) with pytest.warns(DeprecationWarning):", "async def post(self): return web.Response() app = web.Application() app.router.add_routes(routes) client", "return web.Response() app.router.add_get(registered_path, handler) client = await aiohttp_client(app) r =", "app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: path = MagicMock()", "app.router.add_routes(routes) client = await aiohttp_client(app) r = await client.get(\"/a\") assert", "200 await r.release() r = await client.get('/b') assert r.status ==", "\"\"\" Test allow_head on routes. \"\"\" app = web.Application() async", "= await client.get('/special') assert r.status == 404 async def test_partialy_applied_handler(aiohttp_client):", "directory. r = await client.get(prefix) assert r.status == status if", "to a resource that is neither a file nor a", "b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/static/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/static/my_file\">my_file</a></li>\\n'", "my_file_path = os.path.join(tmp_dir_path, 'my_file') with open(my_file_path, 'w') as fw: fw.write('hello')", "the app router. \"\"\" app = web.Application() async def async_handler(request):", "await client.head('/b') assert r.status == 405 await r.release() @pytest.mark.parametrize(\"path\", [", "import os import shutil import tempfile from unittest import mock", "= await client.get('/my_symlink/my_file_in_dir') assert r.status == 200 assert (await r.text())", "= await client.get(prefix) assert r.status == status if data: assert", "Try to list a folder content of static file server", "of the static directory. r = await client.get('/special') assert r.status", "'/a%3Ab'), ]) async def test_url_escaping(aiohttp_client, registered_path, request_url): \"\"\" Tests accessing", "os.symlink(my_dir_path, my_symlink_path) app = web.Application() # Register global static route:", "os import shutil import tempfile from unittest import mock from", "= await aiohttp_client(app) # Request the root of the static", "pytest.warns(DeprecationWarning): app.router.add_route('GET', '/', functools.partial(handler, b'hello')) client = await aiohttp_client(app) r", "os.path.join(tmp_dir_path, 'my_file') with open(my_file_path, 'w') as fw: fw.write('hello') my_dir_path =", "r.status == 200 await r.release() r = await client.post(\"/a\") assert", "aiohttp_client(app) r = await client.get('/') data = (await r.read()) assert", "file server. Try to list a folder content of static", "= await client.get(\"/a\") assert r.status == 200 await r.release() r", "os.path.join(tmp_dir_path, dir_name) if dir_name: os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, filename) with", "return web.Response() def sync_handler(request): \"\"\"Doc\"\"\" return web.Response() app.router.add_get('/async', async_handler) with", "shutil.rmtree(tmp_dir) request.addfinalizer(teardown) return tmp_dir @pytest.mark.parametrize( \"show_index,status,prefix,data\", [pytest.param(False, 403, '/', None,", "async def test_url_escaping(aiohttp_client, registered_path, request_url): \"\"\" Tests accessing a resource", "'my_file') with open(my_file_path, 'w') as fw: fw.write('hello') my_dir_path = os.path.join(tmp_dir_path,", "to access the root of static file server, and make", "== 412 async def test_allow_head(aiohttp_client): \"\"\" Test allow_head on routes.", "web.Application() app.router.add_routes(routes) client = await aiohttp_client(app) r = await client.get(\"/a\")", "path path.iterdir.return_value.__iter__.side_effect = PermissionError() path_constructor.return_value = path # Register global", "Put a file inside tmp_dir_path: my_file_path = os.path.join(tmp_dir_path, 'my_file') with", "== data async def test_follow_symlink(tmp_dir_path, aiohttp_client): \"\"\" Tests the access", "None assert \"<SystemRoute 201: test>\" == repr(route) assert 201 ==", "def teardown(): # Delete the whole directory: shutil.rmtree(tmp_dir) request.addfinalizer(teardown) return", "os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write('world') app =", "id=\"index_forbidden\"), pytest.param(True, 200, '/', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n'", "do so for the folder. \"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_dir')", "await aiohttp_client(app) r = await client.get('/') data = (await r.read())", "assert route.name is None assert route.resource is None assert \"<SystemRoute", "# Put a file inside tmp_dir_path: my_file_path = os.path.join(tmp_dir_path, 'my_file')", "await client.get('/my_symlink/my_file_in_dir') assert r.status == 200 assert (await r.text()) ==", "to a looped symlink, which could not be resolved. \"\"\"", "data = (await r.read()) assert data == b'hello' def test_system_route():", "folder \"\"\" data = 'hello world' my_dir_path = os.path.join(tmp_dir_path, 'my_dir')", "= (await r.read()) assert data == b'hello' def test_system_route(): route", "== b'hello' def test_system_route(): route = SystemRoute(web.HTTPCreated(reason='test')) with pytest.raises(RuntimeError): route.url_for()", "resp.status == 412 async def test_allow_head(aiohttp_client): \"\"\" Test allow_head on", "assert len(app.router.resources()) == 1 def test_resource_raw_match(): app = web.Application() async", "= os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path,", "fw: fw.write('world') app = web.Application() # Register global static route:", "b'<li><a href=\"/static/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_static\")]) async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client, show_index, status,", "tmp_dir = tempfile.mkdtemp() def teardown(): # Delete the whole directory:", "the unauthorized access to a folder of static file server.", "async def test_follow_symlink(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to a", "that is neither a file nor a directory. Checks that", "access to a looped symlink, which could not be resolved.", "await aiohttp_client(app) r = await client.get('/a') assert r.status == 200", "r.release() r = await client.get('/b') assert r.status == 200 await", "and make sure that 404 HTTP status returned. \"\"\" app", "web.Response() app.router.add_routes([ web.view(\"/a\", MyView) ]) client = await aiohttp_client(app) r", "resource. \"\"\" app = web.Application() async def handler(request): return web.Response()", "await client.get('/') data = (await r.read()) assert data == b'hello'", "fw.write('world') app = web.Application() # Register global static route: app.router.add_static(prefix,", "route in resource: assert route.handler.__doc__ == 'Doc' async def test_unauthorized_folder_access(tmp_dir_path,", "are returned depending if we directory index should be shown", "def test_access_non_existing_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests accessing non-existing resource Try to", "def test_412_is_returned(aiohttp_client): class MyRouter(abc.AbstractRouter): async def resolve(self, request): raise web.HTTPPreconditionFailed()", "aiohttp import abc, web from aiohttp.web_urldispatcher import SystemRoute @pytest.fixture(scope='function') def", "= await client.head('/a') assert r.status == 200 await r.release() r", "r = await client.get('/special') assert r.status == 404 async def", "is neither a file nor a directory. Checks that if", "folder') ]) async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client, dir_name, filename, data): \"\"\"", "= web.Application() # Register global static route: app.router.add_static(prefix, tmp_dir_path, show_index=show_index)", "handler(data, request): return web.Response(body=data) with pytest.warns(DeprecationWarning): app.router.add_route('GET', '/', functools.partial(handler, b'hello'))", "open(my_file_path, 'w') as fw: fw.write('hello') my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path)", "= SystemRoute(web.HTTPCreated(reason='test')) with pytest.raises(RuntimeError): route.url_for() assert route.name is None assert", "'my_dir') os.mkdir(my_dir_path) app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: path", "path # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client", "\"\"\" Checks operation of static files with spaces \"\"\" my_dir_path", "pipe or UNIX domain socket) then 404 HTTP status returned.", "sure that correct HTTP statuses are returned depending if we", "= await client.get('/b') assert r.status == 200 await r.release() r", "== 200 await r.release() r = await client.head('/b') assert r.status", "assert r.status == 200 await r.release() r = await client.post(\"/a\")", "412 async def test_allow_head(aiohttp_client): \"\"\" Test allow_head on routes. \"\"\"", "path.joinpath.side_effect = lambda p: (special if p == 'special' else", "root of the static directory. r = await client.get(prefix) assert", "dir_name) if dir_name: os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, filename) with open(my_file_path,", "resource with \"\"\" app = web.Application() async def handler(request): return", "assert r.status == 404 @pytest.mark.parametrize('registered_path,request_url', [ ('/a:b', '/a:b'), ('/a@b', '/a@b'),", "app = web.Application() url = os.path.join('/', dir_name, filename) app.router.add_static('/', tmp_dir_path)", "from unittest import mock from unittest.mock import MagicMock import pytest", "of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>',", "resource is accessed (f.e. named pipe or UNIX domain socket)", "unittest import mock from unittest.mock import MagicMock import pytest from", "client.get(request_url) assert r.status == 200 async def test_handler_metadata_persistence(): \"\"\" Tests", "b'</ul>\\n</body>\\n</html>', id=\"index_root\"), pytest.param(True, 200, '/static', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of", "HTTP statuses are returned depending if we directory index should", "= await client.head('/b') assert r.status == 405 await r.release() @pytest.mark.parametrize(\"path\",", "import mock from unittest.mock import MagicMock import pytest from aiohttp", "teardown(): # Delete the whole directory: shutil.rmtree(tmp_dir) request.addfinalizer(teardown) return tmp_dir", "resource doesn't create a new resource. \"\"\" app = web.Application()", "await client.get('/special') assert r.status == 404 async def test_partialy_applied_handler(aiohttp_client): app", "def handler(request): return web.Response() route = app.router.add_get(\"/a\", handler, name=\"a\") assert", "return web.Response() app.router.add_get('/async', async_handler) with pytest.warns(DeprecationWarning): app.router.add_get('/sync', sync_handler) for resource", "with pytest.warns(DeprecationWarning): app.router.add_route('GET', '/', functools.partial(handler, b'hello')) client = await aiohttp_client(app)", "= MagicMock() path.joinpath.side_effect = lambda p: (special if p ==", "my_file_path = os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write(data)", "r.read()) assert read_ == data async def test_follow_symlink(tmp_dir_path, aiohttp_client): \"\"\"", "== 200 await r.release() r = await client.get('/b') assert r.status", "app = web.Application() async def handler(request): return web.Response() route =", "web.RouteTableDef() @routes.view(\"/a\") class MyView(web.View): async def get(self): return web.Response() async", "test_decorate_view(aiohttp_client): routes = web.RouteTableDef() @routes.view(\"/a\") class MyView(web.View): async def get(self):", "r.release() async def test_web_view(aiohttp_client): app = web.Application() class MyView(web.View): async", "root of static file server, and make sure that correct", "import MagicMock import pytest from aiohttp import abc, web from", "aiohttp_client): \"\"\" Tests accessing non-existing resource Try to access a", "\"\"\" Tests accessing metadata of a handler after registering it", "route.status assert 'test' == route.reason async def test_412_is_returned(aiohttp_client): class MyRouter(abc.AbstractRouter):", "await client.get('/non_existing_resource') assert r.status == 404 @pytest.mark.parametrize('registered_path,request_url', [ ('/a:b', '/a:b'),", "'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as", "that 404 HTTP status returned. \"\"\" app = web.Application() #", "app.router.add_static(\"/static\", \".\") assert not resource.raw_match(\"/static\") async def test_add_view(aiohttp_client): app =", "\"\"\" app = web.Application() async def handler(request): return web.Response() app.router.add_get(path,", "from aiohttp.web_urldispatcher import SystemRoute @pytest.fixture(scope='function') def tmp_dir_path(request): \"\"\" Give a", "client.put(\"/a\") assert r.status == 405 await r.release() async def test_web_view(aiohttp_client):", "looped symlink, which could not be resolved. \"\"\" my_dir_path =", "to do so for the folder. \"\"\" my_dir_path = os.path.join(tmp_dir_path,", "then 404 HTTP status returned. \"\"\" app = web.Application() with", "Tests the operation of static file server. Try to access", "static route: app.router.add_static('/', tmp_dir_path, follow_symlinks=True) client = await aiohttp_client(app) #", "not resource.raw_match(\"/static\") async def test_add_view(aiohttp_client): app = web.Application() class MyView(web.View):", "accessing non-existing resource Try to access a non-exiting resource and", "static route: app.router.add_static(prefix, tmp_dir_path, show_index=show_index) client = await aiohttp_client(app) #", "aiohttp_client, dir_name, filename, data): \"\"\" Checks operation of static files", "404 @pytest.mark.parametrize('registered_path,request_url', [ ('/a:b', '/a:b'), ('/a@b', '/a@b'), ('/a:b', '/a%3Ab'), ])", "'/', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/my_dir\">my_dir/</a></li>\\n' b'<li><a", "be shown or not. \"\"\" # Put a file inside", "static directory. r = await client.get('/special') assert r.status == 404", "get(self): return web.Response() async def post(self): return web.Response() app =", "await r.release() r = await client.post(\"/a\") assert r.status == 200", "= await client.get('/my_dir') assert r.status == 403 async def test_access_symlink_loop(tmp_dir_path,", "is None assert \"<SystemRoute 201: test>\" == repr(route) assert 201", "data: assert r.headers['Content-Type'] == \"text/html; charset=utf-8\" read_ = (await r.read())", "to a symlink, in static folder \"\"\" data = 'hello", "name='b') client = await aiohttp_client(app) r = await client.get('/a') assert", "r.text()) == data async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests accessing", "== data @pytest.mark.parametrize('dir_name,filename,data', [ ('', 'test file.txt', 'test text'), ('test", "assert route.resource.raw_match(\"/a\") route = app.router.add_get(\"/{b}\", handler, name=\"b\") assert route.resource.raw_match(\"/{b}\") resource", "routes = web.RouteTableDef() @routes.view(\"/a\") class MyView(web.View): async def get(self): return", "= path special.resolve.return_value = special path_constructor.return_value = path # Register", "assert r.status == 404 async def test_partialy_applied_handler(aiohttp_client): app = web.Application()", "route with the same name and path of the last", "return web.Response() app.router.add_get(path, handler, name=\"a\") app.router.add_post(path, handler, name=\"a\") assert len(app.router.resources())", "== 1 def test_resource_raw_match(): app = web.Application() async def handler(request):", "r = await client.post(\"/a\") assert r.status == 200 await r.release()", "server, and make sure that correct HTTP statuses are returned", "the static directory. r = await client.get('/my_symlink') assert r.status ==", "file folder') ]) async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client, dir_name, filename, data):", "PermissionError() path_constructor.return_value = path # Register global static route: app.router.add_static('/',", "a special resource is accessed (f.e. named pipe or UNIX", "else path) path.resolve.return_value = path special.resolve.return_value = special path_constructor.return_value =", "async def test_partialy_applied_handler(aiohttp_client): app = web.Application() async def handler(data, request):", "prefix, data): \"\"\" Tests the operation of static file server.", "added resource doesn't create a new resource. \"\"\" app =", "that correct HTTP statuses are returned depending if we directory", "await client.get(\"/a\") assert r.status == 200 await r.release() r =", "allow_head on routes. \"\"\" app = web.Application() async def handler(_):", "= await aiohttp_client(app) r = await client.get(\"/a\") assert r.status ==", "or not. \"\"\" # Put a file inside tmp_dir_path: my_file_path", "filename) with open(my_file_path, 'w') as fw: fw.write(data) app = web.Application()", "fw: fw.write(data) app = web.Application() url = os.path.join('/', dir_name, filename)", "static route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app) #", "= os.path.join('/', dir_name, filename) app.router.add_static('/', tmp_dir_path) client = await aiohttp_client(app)", "('/a@b', '/a@b'), ('/a:b', '/a%3Ab'), ]) async def test_url_escaping(aiohttp_client, registered_path, request_url):", "symlink, which could not be resolved. \"\"\" my_dir_path = os.path.join(tmp_dir_path,", "make sure that 404 HTTP status returned. \"\"\" app =", "or UNIX domain socket) then 404 HTTP status returned. \"\"\"", "web.Response() async def post(self): return web.Response() app.router.add_view(\"/a\", MyView) client =", "def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client, show_index, status, prefix, data): \"\"\" Tests the", "static file server. Try to list a folder content of", "async def post(self): return web.Response() app.router.add_view(\"/a\", MyView) client = await", "\"\"\" app = web.Application() async def async_handler(request): \"\"\"Doc\"\"\" return web.Response()", "with open(my_file_path, 'w') as fw: fw.write('hello') my_dir_path = os.path.join(tmp_dir_path, 'my_dir')", "tempfile.mkdtemp() def teardown(): # Delete the whole directory: shutil.rmtree(tmp_dir) request.addfinalizer(teardown)", "MyRouter(abc.AbstractRouter): async def resolve(self, request): raise web.HTTPPreconditionFailed() app = web.Application(router=MyRouter())", "= 'hello world' my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path =", "resp = await client.get('/') assert resp.status == 412 async def", "= await client.get('/a') assert r.status == 200 await r.release() r", "r.headers['Content-Type'] == \"text/html; charset=utf-8\" read_ = (await r.read()) assert read_", "[ '/a', '/{a}', ]) def test_reuse_last_added_resource(path): \"\"\" Test that adding", "route.resource.raw_match(\"/{b}\") resource = app.router.add_static(\"/static\", \".\") assert not resource.raw_match(\"/static\") async def", "404 async def test_partialy_applied_handler(aiohttp_client): app = web.Application() async def handler(data,", "tmp_dir_path(request): \"\"\" Give a path for a temporary directory The", "my_symlink_path) app = web.Application() # Register global static route: app.router.add_static('/',", "[pytest.param(False, 403, '/', None, id=\"index_forbidden\"), pytest.param(True, 200, '/', b'<html>\\n<head>\\n<title>Index of", "root of the static directory. r = await client.get('/my_symlink/my_file_in_dir') assert", "\"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_dir_path) app = web.Application()", "def test_handler_metadata_persistence(): \"\"\" Tests accessing metadata of a handler after", "b'hello' def test_system_route(): route = SystemRoute(web.HTTPCreated(reason='test')) with pytest.raises(RuntimeError): route.url_for() assert", "os.mkdir(my_dir_path) app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: path =", "await client.put(\"/a\") assert r.status == 405 await r.release() async def", "(await r.text()) == data async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests", "Try to access a non-exiting resource and make sure that", "import SystemRoute @pytest.fixture(scope='function') def tmp_dir_path(request): \"\"\" Give a path for", "pytest.param(True, 200, '/static', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a", "= await client.get('/non_existing_resource') assert r.status == 404 @pytest.mark.parametrize('registered_path,request_url', [ ('/a:b',", "access a non-exiting resource and make sure that 404 HTTP", "a resource that is neither a file nor a directory.", "app.router.add_get(registered_path, handler) client = await aiohttp_client(app) r = await client.get(request_url)", "= False special.is_file.return_value = False path = MagicMock() path.joinpath.side_effect =", "MagicMock() path.joinpath.side_effect = lambda p: (special if p == 'special'", "await aiohttp_client(app) resp = await client.get('/') assert resp.status == 412", "os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write(data) my_symlink_path =", "route: app.router.add_static('/', tmp_dir_path, follow_symlinks=True) client = await aiohttp_client(app) # Request", "root of the static directory. r = await client.get('/non_existing_resource') assert", "client.get('/') assert resp.status == 412 async def test_allow_head(aiohttp_client): \"\"\" Test", "os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_symlink_path) app = web.Application() # Register global", "== 200 await r.release() r = await client.head('/a') assert r.status", "assert route.handler.__doc__ == 'Doc' async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client): \"\"\" Tests", "return web.Response() async def post(self): return web.Response() app = web.Application()", "and make sure that correct HTTP statuses are returned depending", "resource.raw_match(\"/static\") async def test_add_view(aiohttp_client): app = web.Application() class MyView(web.View): async", "with the same name and path of the last added", "route.resource is None assert \"<SystemRoute 201: test>\" == repr(route) assert", "href=\"/static/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/static/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_static\")]) async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client, show_index,", "'/', None, id=\"index_forbidden\"), pytest.param(True, 200, '/', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index", "async def test_access_symlink_loop(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to a", "app router. \"\"\" app = web.Application() async def async_handler(request): \"\"\"Doc\"\"\"", "p == 'special' else path) path.resolve.return_value = path special.resolve.return_value =", "name and path of the last added resource doesn't create", "index should be shown or not. \"\"\" # Put a", "as fw: fw.write(data) my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_symlink_path) app", "MagicMock import pytest from aiohttp import abc, web from aiohttp.web_urldispatcher", "for resource in app.router.resources(): for route in resource: assert route.handler.__doc__", "# Register global static route: app.router.add_static(prefix, tmp_dir_path, show_index=show_index) client =", "dir_name, filename) app.router.add_static('/', tmp_dir_path) client = await aiohttp_client(app) r =", "client.get('/a') assert r.status == 200 await r.release() r = await", "files with spaces \"\"\" my_dir_path = os.path.join(tmp_dir_path, dir_name) if dir_name:", "def test_allow_head(aiohttp_client): \"\"\" Test allow_head on routes. \"\"\" app =", "assert route.resource.raw_match(\"/{b}\") resource = app.router.add_static(\"/static\", \".\") assert not resource.raw_match(\"/static\") async", "status, prefix, data): \"\"\" Tests the operation of static file", "in static folder \"\"\" data = 'hello world' my_dir_path =", "dir file .txt', 'test text file folder') ]) async def", "static directory. r = await client.get('/non_existing_resource') assert r.status == 404", "app = web.Application() async def handler(data, request): return web.Response(body=data) with", "app.router.add_get('/sync', sync_handler) for resource in app.router.resources(): for route in resource:", "'w') as fw: fw.write('hello') my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path", "accessing metadata of a handler after registering it on the", "the root of the static directory. r = await client.get('/my_dir')", "test_web_view(aiohttp_client): app = web.Application() class MyView(web.View): async def get(self): return", "get(self): return web.Response() async def post(self): return web.Response() app.router.add_view(\"/a\", MyView)", "my_dir_path) app = web.Application() # Register global static route: app.router.add_static('/',", "tmp_dir_path) client = await aiohttp_client(app) r = await client.get(url) assert", "the whole directory: shutil.rmtree(tmp_dir) request.addfinalizer(teardown) return tmp_dir @pytest.mark.parametrize( \"show_index,status,prefix,data\", [pytest.param(False,", "\"text/html; charset=utf-8\" read_ = (await r.read()) assert read_ == data", "data @pytest.mark.parametrize('dir_name,filename,data', [ ('', 'test file.txt', 'test text'), ('test dir", "web.Application() async def async_handler(request): \"\"\"Doc\"\"\" return web.Response() def sync_handler(request): \"\"\"Doc\"\"\"", "return web.Response() app.router.add_get('/a', handler, name='a') app.router.add_get('/b', handler, allow_head=False, name='b') client", "b'<li><a href=\"/static/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/static/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_static\")]) async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client,", "405 await r.release() @pytest.mark.parametrize(\"path\", [ '/a', '/{a}', ]) def test_reuse_last_added_resource(path):", "handler, name=\"b\") assert route.resource.raw_match(\"/{b}\") resource = app.router.add_static(\"/static\", \".\") assert not", "async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client, show_index, status, prefix, data): \"\"\" Tests", "client.get('/my_symlink/my_file_in_dir') assert r.status == 200 assert (await r.text()) == data", "def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client, dir_name, filename, data): \"\"\" Checks operation of", "test_follow_symlink(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to a symlink, in", "web.Application() async def handler(request): return web.Response() app.router.add_get(registered_path, handler) client =", "b'<li><a href=\"/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_root\"), pytest.param(True, 200, '/static', b'<html>\\n<head>\\n<title>Index of /.</title>\\n'", "to access a non-exiting resource and make sure that 404", "== 404 async def test_access_special_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests the access", "of the static directory. r = await client.get('/my_symlink/my_file_in_dir') assert r.status", "False special.is_file.return_value = False path = MagicMock() path.joinpath.side_effect = lambda", "= lambda p: (special if p == 'special' else path)", "= await client.post(\"/a\") assert r.status == 200 await r.release() r", "\"<SystemRoute 201: test>\" == repr(route) assert 201 == route.status assert", "web.Application() async def handler(_): return web.Response() app.router.add_get('/a', handler, name='a') app.router.add_get('/b',", "Temporary directory. tmp_dir = tempfile.mkdtemp() def teardown(): # Delete the", "path.iterdir.return_value.__iter__.side_effect = PermissionError() path_constructor.return_value = path # Register global static", "# Request the root of the static directory. r =", "Tests the access to a looped symlink, which could not", "200, '/', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/my_dir\">my_dir/</a></li>\\n'", "text'), ('test dir name', 'test dir file .txt', 'test text", "= tempfile.mkdtemp() def teardown(): # Delete the whole directory: shutil.rmtree(tmp_dir)", "web.Response() app.router.add_get(registered_path, handler) client = await aiohttp_client(app) r = await", "a file nor a directory. Checks that if a special", "web.Application(router=MyRouter()) client = await aiohttp_client(app) resp = await client.get('/') assert", "data async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests accessing non-existing resource", "None assert route.resource is None assert \"<SystemRoute 201: test>\" ==", "app = web.Application() app.router.add_routes(routes) client = await aiohttp_client(app) r =", "static file server. Try to access the root of static", "unittest.mock import MagicMock import pytest from aiohttp import abc, web", "await client.get('/my_dir') assert r.status == 403 async def test_access_symlink_loop(tmp_dir_path, aiohttp_client):", "status returned. \"\"\" app = web.Application() # Register global static", "tempfile from unittest import mock from unittest.mock import MagicMock import", "= await client.get('/') assert resp.status == 412 async def test_allow_head(aiohttp_client):", "assert r.status == 200 await r.release() r = await client.get('/b')", "len(app.router.resources()) == 1 def test_resource_raw_match(): app = web.Application() async def", "b'<li><a href=\"/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_root\"), pytest.param(True, 200, '/static', b'<html>\\n<head>\\n<title>Index", "of the static directory. r = await client.get('/my_symlink') assert r.status", "web.Response() app.router.add_get(path, handler, name=\"a\") app.router.add_post(path, handler, name=\"a\") assert len(app.router.resources()) ==", "accessed (f.e. named pipe or UNIX domain socket) then 404", "route.resource.raw_match(\"/a\") route = app.router.add_get(\"/{b}\", handler, name=\"b\") assert route.resource.raw_match(\"/{b}\") resource =", "handler after registering it on the app router. \"\"\" app", "'w') as fw: fw.write(data) my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_symlink_path)", "/.</h1>\\n<ul>\\n' b'<li><a href=\"/static/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/static/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_static\")]) async def test_access_root_of_static_handler(tmp_dir_path,", "my_file_path = os.path.join(my_dir_path, filename) with open(my_file_path, 'w') as fw: fw.write(data)", "'my_symlink') os.symlink(my_dir_path, my_dir_path) app = web.Application() # Register global static", "= web.Application() async def handler(request): return web.Response() route = app.router.add_get(\"/a\",", "id=\"index_static\")]) async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client, show_index, status, prefix, data): \"\"\"", "directory. tmp_dir = tempfile.mkdtemp() def teardown(): # Delete the whole", "class MyView(web.View): async def get(self): return web.Response() async def post(self):", "neither a file nor a directory. Checks that if a", "None, id=\"index_forbidden\"), pytest.param(True, 200, '/', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of", "correct HTTP statuses are returned depending if we directory index", "async def test_add_view(aiohttp_client): app = web.Application() class MyView(web.View): async def", "test_url_escaping(aiohttp_client, registered_path, request_url): \"\"\" Tests accessing a resource with \"\"\"", "aiohttp_client(app) # Request the root of the static directory. r", "static file server when server does not have permissions to", "Tests accessing non-existing resource Try to access a non-exiting resource", "web.Response() app.router.add_view(\"/a\", MyView) client = await aiohttp_client(app) r = await", "= web.Application() class MyView(web.View): async def get(self): return web.Response() async", "pytest.param(True, 200, '/', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a", "await client.post(\"/a\") assert r.status == 200 await r.release() r =", "same name and path of the last added resource doesn't", "content of static file server when server does not have", "b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_root\"), pytest.param(True,", "root of the static directory. r = await client.get('/my_dir') assert", "\"\"\" app = web.Application() # Register global static route: app.router.add_static('/',", "web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: special = MagicMock() special.is_dir.return_value =", "resource in app.router.resources(): for route in resource: assert route.handler.__doc__ ==", "def get(self): return web.Response() async def post(self): return web.Response() app.router.add_view(\"/a\",", "assert r.status == 405 await r.release() async def test_decorate_view(aiohttp_client): routes", "server. Try to list a folder content of static file", "client.get('/my_dir') assert r.status == 403 async def test_access_symlink_loop(tmp_dir_path, aiohttp_client): \"\"\"", "special.is_dir.return_value = False special.is_file.return_value = False path = MagicMock() path.joinpath.side_effect", "200 await r.release() r = await client.head('/a') assert r.status ==", "aiohttp_client): \"\"\" Tests the access to a looped symlink, which", "def test_access_symlink_loop(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to a looped", "to a folder of static file server. Try to list", "not. \"\"\" # Put a file inside tmp_dir_path: my_file_path =", "of static file server. Try to list a folder content", "def resolve(self, request): raise web.HTTPPreconditionFailed() app = web.Application(router=MyRouter()) client =", "returned. \"\"\" app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: special", "route.name is None assert route.resource is None assert \"<SystemRoute 201:", "b'hello')) client = await aiohttp_client(app) r = await client.get('/') data", "handler, name=\"a\") app.router.add_post(path, handler, name=\"a\") assert len(app.router.resources()) == 1 def", "with pytest.warns(DeprecationWarning): app.router.add_get('/sync', sync_handler) for resource in app.router.resources(): for route", "if data: assert r.headers['Content-Type'] == \"text/html; charset=utf-8\" read_ = (await", "\".\") assert not resource.raw_match(\"/static\") async def test_add_view(aiohttp_client): app = web.Application()", "open(my_file_path, 'w') as fw: fw.write(data) my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path,", "Tests the unauthorized access to a folder of static file", "200 await r.release() r = await client.head('/b') assert r.status ==", "b'</ul>\\n</body>\\n</html>', id=\"index_static\")]) async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client, show_index, status, prefix, data):", "def get(self): return web.Response() async def post(self): return web.Response() app.router.add_routes([", "app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: special = MagicMock()", "p: (special if p == 'special' else path) path.resolve.return_value =", "and path of the last added resource doesn't create a", "'my_symlink') os.symlink(my_dir_path, my_symlink_path) app = web.Application() # Register global static", "when server does not have permissions to do so for", "name=\"a\") assert len(app.router.resources()) == 1 def test_resource_raw_match(): app = web.Application()", "web.Response() app.router.add_get('/async', async_handler) with pytest.warns(DeprecationWarning): app.router.add_get('/sync', sync_handler) for resource in", "last added resource doesn't create a new resource. \"\"\" app", "web.Application() class MyView(web.View): async def get(self): return web.Response() async def", "handler, allow_head=False, name='b') client = await aiohttp_client(app) r = await", "r = await client.get(prefix) assert r.status == status if data:", "the root of the static directory. r = await client.get(prefix)", "of /.</h1>\\n<ul>\\n' b'<li><a href=\"/static/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/static/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_static\")]) async def", "\"\"\" data = 'hello world' my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path)", "== 404 @pytest.mark.parametrize('registered_path,request_url', [ ('/a:b', '/a:b'), ('/a@b', '/a@b'), ('/a:b', '/a%3Ab'),", "request): return web.Response(body=data) with pytest.warns(DeprecationWarning): app.router.add_route('GET', '/', functools.partial(handler, b'hello')) client", "app.router.add_get(\"/{b}\", handler, name=\"b\") assert route.resource.raw_match(\"/{b}\") resource = app.router.add_static(\"/static\", \".\") assert", "world' my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')", "mock.patch('pathlib.Path.__new__') as path_constructor: path = MagicMock() path.joinpath.return_value = path path.resolve.return_value", "r = await client.head('/b') assert r.status == 405 await r.release()", "post(self): return web.Response() app = web.Application() app.router.add_routes(routes) client = await", "is None assert route.resource is None assert \"<SystemRoute 201: test>\"", "a folder content of static file server when server does", "(await r.read()) assert read_ == data async def test_follow_symlink(tmp_dir_path, aiohttp_client):", "test>\" == repr(route) assert 201 == route.status assert 'test' ==", "app.router.add_get(\"/a\", handler, name=\"a\") assert route.resource.raw_match(\"/a\") route = app.router.add_get(\"/{b}\", handler, name=\"b\")", "client.get('/') data = (await r.read()) assert data == b'hello' def", "async def test_handler_metadata_persistence(): \"\"\" Tests accessing metadata of a handler", "test_access_symlink_loop(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to a looped symlink,", "status if data: assert r.headers['Content-Type'] == \"text/html; charset=utf-8\" read_ =", "200, '/static', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/static/my_dir\">my_dir/</a></li>\\n'", "await aiohttp_client(app) r = await client.get(\"/a\") assert r.status == 200", "filename) app.router.add_static('/', tmp_dir_path) client = await aiohttp_client(app) r = await", "my_dir_path = os.path.join(tmp_dir_path, dir_name) if dir_name: os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path,", "directory. r = await client.get('/my_symlink/my_file_in_dir') assert r.status == 200 assert", "'test file.txt', 'test text'), ('test dir name', 'test dir file", "client.get('/non_existing_resource') assert r.status == 404 @pytest.mark.parametrize('registered_path,request_url', [ ('/a:b', '/a:b'), ('/a@b',", "app = web.Application() async def async_handler(request): \"\"\"Doc\"\"\" return web.Response() def", "web.Application() # Register global static route: app.router.add_static(prefix, tmp_dir_path, show_index=show_index) client", "path path.resolve.return_value = path path.iterdir.return_value.__iter__.side_effect = PermissionError() path_constructor.return_value = path", "201: test>\" == repr(route) assert 201 == route.status assert 'test'", "== route.reason async def test_412_is_returned(aiohttp_client): class MyRouter(abc.AbstractRouter): async def resolve(self,", "web.Response() app.router.add_get('/a', handler, name='a') app.router.add_get('/b', handler, allow_head=False, name='b') client =", "data == b'hello' def test_system_route(): route = SystemRoute(web.HTTPCreated(reason='test')) with pytest.raises(RuntimeError):", "folder of static file server. Try to list a folder", "url = os.path.join('/', dir_name, filename) app.router.add_static('/', tmp_dir_path) client = await", "@pytest.mark.parametrize('registered_path,request_url', [ ('/a:b', '/a:b'), ('/a@b', '/a@b'), ('/a:b', '/a%3Ab'), ]) async", "r = await client.get('/my_symlink/my_file_in_dir') assert r.status == 200 assert (await", "app = web.Application(router=MyRouter()) client = await aiohttp_client(app) resp = await", "\"\"\" Give a path for a temporary directory The directory", "\"\"\" # Put a file inside tmp_dir_path: my_file_path = os.path.join(tmp_dir_path,", "a resource with \"\"\" app = web.Application() async def handler(request):", "r = await client.get('/non_existing_resource') assert r.status == 404 @pytest.mark.parametrize('registered_path,request_url', [", "allow_head=False, name='b') client = await aiohttp_client(app) r = await client.get('/a')", "if dir_name: os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, filename) with open(my_file_path, 'w')", "app.router.add_get('/a', handler, name='a') app.router.add_get('/b', handler, allow_head=False, name='b') client = await", "test_access_root_of_static_handler(tmp_dir_path, aiohttp_client, show_index, status, prefix, data): \"\"\" Tests the operation", "== 200 assert (await r.text()) == data @pytest.mark.parametrize('dir_name,filename,data', [ ('',", "r.status == 403 async def test_access_symlink_loop(tmp_dir_path, aiohttp_client): \"\"\" Tests the", "Checks operation of static files with spaces \"\"\" my_dir_path =", "show_index=True) client = await aiohttp_client(app) # Request the root of", "\"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) app = web.Application() with", "tmp_dir @pytest.mark.parametrize( \"show_index,status,prefix,data\", [pytest.param(False, 403, '/', None, id=\"index_forbidden\"), pytest.param(True, 200,", "of the static directory. r = await client.get(prefix) assert r.status", "resource = app.router.add_static(\"/static\", \".\") assert not resource.raw_match(\"/static\") async def test_add_view(aiohttp_client):", "== 405 await r.release() async def test_web_view(aiohttp_client): app = web.Application()", "= os.path.join(tmp_dir_path, dir_name) if dir_name: os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, filename)", "client = await aiohttp_client(app) r = await client.get(request_url) assert r.status", "await r.release() async def test_decorate_view(aiohttp_client): routes = web.RouteTableDef() @routes.view(\"/a\") class", "web.Application() async def handler(data, request): return web.Response(body=data) with pytest.warns(DeprecationWarning): app.router.add_route('GET',", "with open(my_file_path, 'w') as fw: fw.write(data) app = web.Application() url", "async_handler) with pytest.warns(DeprecationWarning): app.router.add_get('/sync', sync_handler) for resource in app.router.resources(): for", "r = await client.put(\"/a\") assert r.status == 405 await r.release()", "def test_web_view(aiohttp_client): app = web.Application() class MyView(web.View): async def get(self):", "\"\"\" Tests the access to a resource that is neither", "resource and make sure that 404 HTTP status returned. \"\"\"", "assert r.status == 200 assert (await r.text()) == data async", "client = await aiohttp_client(app) # Request the root of the", "test_access_non_existing_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests accessing non-existing resource Try to access", "the access to a symlink, in static folder \"\"\" data", "await client.get('/my_symlink') assert r.status == 404 async def test_access_special_resource(tmp_dir_path, aiohttp_client):", "Tests accessing a resource with \"\"\" app = web.Application() async", "unauthorized access to a folder of static file server. Try", "as path_constructor: special = MagicMock() special.is_dir.return_value = False special.is_file.return_value =", "r = await client.get(\"/a\") assert r.status == 200 await r.release()", "200 await r.release() r = await client.put(\"/a\") assert r.status ==", "aiohttp_client): \"\"\" Tests the access to a resource that is", "path special.resolve.return_value = special path_constructor.return_value = path # Register global", "MagicMock() path.joinpath.return_value = path path.resolve.return_value = path path.iterdir.return_value.__iter__.side_effect = PermissionError()", "client.get('/special') assert r.status == 404 async def test_partialy_applied_handler(aiohttp_client): app =", "app = web.Application() async def handler(request): return web.Response() app.router.add_get(registered_path, handler)", "r = await client.get('/b') assert r.status == 200 await r.release()", "assert r.status == 200 await r.release() r = await client.head('/a')", "socket) then 404 HTTP status returned. \"\"\" app = web.Application()", "assert r.status == 405 await r.release() @pytest.mark.parametrize(\"path\", [ '/a', '/{a}',", "get(self): return web.Response() async def post(self): return web.Response() app.router.add_routes([ web.view(\"/a\",", "of the static directory. r = await client.get('/non_existing_resource') assert r.status", "app.router.add_routes([ web.view(\"/a\", MyView) ]) client = await aiohttp_client(app) r =", "which could not be resolved. \"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_symlink')", "named pipe or UNIX domain socket) then 404 HTTP status", "test_412_is_returned(aiohttp_client): class MyRouter(abc.AbstractRouter): async def resolve(self, request): raise web.HTTPPreconditionFailed() app", "= MagicMock() special.is_dir.return_value = False special.is_file.return_value = False path =", "a looped symlink, which could not be resolved. \"\"\" my_dir_path", "nor a directory. Checks that if a special resource is", "r.read()) assert data == b'hello' def test_system_route(): route = SystemRoute(web.HTTPCreated(reason='test'))", "if p == 'special' else path) path.resolve.return_value = path special.resolve.return_value", "for route in resource: assert route.handler.__doc__ == 'Doc' async def", "a new resource. \"\"\" app = web.Application() async def handler(request):", "a handler after registering it on the app router. \"\"\"", "200 assert (await r.text()) == data @pytest.mark.parametrize('dir_name,filename,data', [ ('', 'test", "os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w')", "def post(self): return web.Response() app.router.add_routes([ web.view(\"/a\", MyView) ]) client =", "Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await", "]) async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client, dir_name, filename, data): \"\"\" Checks", "file inside tmp_dir_path: my_file_path = os.path.join(tmp_dir_path, 'my_file') with open(my_file_path, 'w')", "= await client.get(request_url) assert r.status == 200 async def test_handler_metadata_persistence():", "async def async_handler(request): \"\"\"Doc\"\"\" return web.Response() def sync_handler(request): \"\"\"Doc\"\"\" return", "== route.status assert 'test' == route.reason async def test_412_is_returned(aiohttp_client): class", "make sure that correct HTTP statuses are returned depending if", "404 async def test_access_special_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to", "404 HTTP status returned. \"\"\" app = web.Application() with mock.patch('pathlib.Path.__new__')", "href=\"/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_root\"), pytest.param(True, 200, '/static', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index", "special = MagicMock() special.is_dir.return_value = False special.is_file.return_value = False path", "of the static directory. r = await client.get('/my_dir') assert r.status", "= await client.get(url) assert r.status == 200 assert (await r.text())", "async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client): \"\"\" Tests the unauthorized access to", "= path # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True)", "temporary directory The directory is destroyed at the end of", "static directory. r = await client.get(prefix) assert r.status == status", "UNIX domain socket) then 404 HTTP status returned. \"\"\" app", "(special if p == 'special' else path) path.resolve.return_value = path", "@pytest.mark.parametrize('dir_name,filename,data', [ ('', 'test file.txt', 'test text'), ('test dir name',", "at the end of the test. \"\"\" # Temporary directory.", "@pytest.mark.parametrize(\"path\", [ '/a', '/{a}', ]) def test_reuse_last_added_resource(path): \"\"\" Test that", "def test_decorate_view(aiohttp_client): routes = web.RouteTableDef() @routes.view(\"/a\") class MyView(web.View): async def", "the folder. \"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) app =", "web.Response() route = app.router.add_get(\"/a\", handler, name=\"a\") assert route.resource.raw_match(\"/a\") route =", "r = await client.get(url) assert r.status == 200 assert (await", "path.resolve.return_value = path special.resolve.return_value = special path_constructor.return_value = path #", "= web.RouteTableDef() @routes.view(\"/a\") class MyView(web.View): async def get(self): return web.Response()", "async def test_web_view(aiohttp_client): app = web.Application() class MyView(web.View): async def", "'test dir file .txt', 'test text file folder') ]) async", "fw: fw.write(data) my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_symlink_path) app =", "path = MagicMock() path.joinpath.return_value = path path.resolve.return_value = path path.iterdir.return_value.__iter__.side_effect", "test_system_route(): route = SystemRoute(web.HTTPCreated(reason='test')) with pytest.raises(RuntimeError): route.url_for() assert route.name is", "is destroyed at the end of the test. \"\"\" #", "access to a resource that is neither a file nor", "with pytest.raises(RuntimeError): route.url_for() assert route.name is None assert route.resource is", "from unittest.mock import MagicMock import pytest from aiohttp import abc,", "= path path.iterdir.return_value.__iter__.side_effect = PermissionError() path_constructor.return_value = path # Register", "have permissions to do so for the folder. \"\"\" my_dir_path", "status returned. \"\"\" app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor:", "# Temporary directory. tmp_dir = tempfile.mkdtemp() def teardown(): # Delete", "= path path.resolve.return_value = path path.iterdir.return_value.__iter__.side_effect = PermissionError() path_constructor.return_value =", "returned. \"\"\" app = web.Application() # Register global static route:", "\"\"\" Tests the access to a symlink, in static folder", "return web.Response() route = app.router.add_get(\"/a\", handler, name=\"a\") assert route.resource.raw_match(\"/a\") route", "'/{a}', ]) def test_reuse_last_added_resource(path): \"\"\" Test that adding a route", "client = await aiohttp_client(app) r = await client.get('/a') assert r.status", "= os.path.join(tmp_dir_path, 'my_file') with open(my_file_path, 'w') as fw: fw.write('hello') my_dir_path", "The directory is destroyed at the end of the test.", "r.status == 405 await r.release() async def test_decorate_view(aiohttp_client): routes =", "def post(self): return web.Response() app.router.add_view(\"/a\", MyView) client = await aiohttp_client(app)", "domain socket) then 404 HTTP status returned. \"\"\" app =", "assert r.status == 200 assert (await r.text()) == data @pytest.mark.parametrize('dir_name,filename,data',", "a path for a temporary directory The directory is destroyed", "(await r.read()) assert data == b'hello' def test_system_route(): route =", "create a new resource. \"\"\" app = web.Application() async def", "def test_reuse_last_added_resource(path): \"\"\" Test that adding a route with the", "resource Try to access a non-exiting resource and make sure", "os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_dir_path) app = web.Application() # Register global", "/.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/static/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/static/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_static\")])", "= await aiohttp_client(app) r = await client.get('/') data = (await", "resolve(self, request): raise web.HTTPPreconditionFailed() app = web.Application(router=MyRouter()) client = await", "await client.get('/') assert resp.status == 412 async def test_allow_head(aiohttp_client): \"\"\"", "= web.Application() async def handler(request): return web.Response() app.router.add_get(path, handler, name=\"a\")", "= await aiohttp_client(app) r = await client.get(request_url) assert r.status ==", "route = SystemRoute(web.HTTPCreated(reason='test')) with pytest.raises(RuntimeError): route.url_for() assert route.name is None", "MyView) client = await aiohttp_client(app) r = await client.get(\"/a\") assert", "web.view(\"/a\", MyView) ]) client = await aiohttp_client(app) r = await", "app = web.Application() async def handler(request): return web.Response() app.router.add_get(path, handler,", "directory. r = await client.get('/special') assert r.status == 404 async", "= web.Application() # Register global static route: app.router.add_static('/', tmp_dir_path, follow_symlinks=True)", "aiohttp_client(app) r = await client.get(url) assert r.status == 200 assert", "async def handler(data, request): return web.Response(body=data) with pytest.warns(DeprecationWarning): app.router.add_route('GET', '/',", "sync_handler) for resource in app.router.resources(): for route in resource: assert", "id=\"index_root\"), pytest.param(True, 200, '/static', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n'", "b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/static/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/static/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_static\")]) async", "]) async def test_url_escaping(aiohttp_client, registered_path, request_url): \"\"\" Tests accessing a", "== 200 async def test_handler_metadata_persistence(): \"\"\" Tests accessing metadata of", "the root of the static directory. r = await client.get('/special')", "r.status == 200 await r.release() r = await client.head('/a') assert", "aiohttp_client(app) r = await client.get(\"/a\") assert r.status == 200 await", "r.status == 404 @pytest.mark.parametrize('registered_path,request_url', [ ('/a:b', '/a:b'), ('/a@b', '/a@b'), ('/a:b',", "resource that is neither a file nor a directory. Checks", "= web.Application() # Register global static route: app.router.add_static('/', tmp_dir_path, show_index=True)", "global static route: app.router.add_static(prefix, tmp_dir_path, show_index=show_index) client = await aiohttp_client(app)", "Register global static route: app.router.add_static(prefix, tmp_dir_path, show_index=show_index) client = await", "= app.router.add_get(\"/{b}\", handler, name=\"b\") assert route.resource.raw_match(\"/{b}\") resource = app.router.add_static(\"/static\", \".\")", "r = await client.get('/') data = (await r.read()) assert data", "r.status == 200 assert (await r.text()) == data async def", "'/a:b'), ('/a@b', '/a@b'), ('/a:b', '/a%3Ab'), ]) async def test_url_escaping(aiohttp_client, registered_path,", "does not have permissions to do so for the folder.", "r.status == 404 async def test_access_special_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests the", "raise web.HTTPPreconditionFailed() app = web.Application(router=MyRouter()) client = await aiohttp_client(app) resp", "with open(my_file_path, 'w') as fw: fw.write('world') app = web.Application() #", "import tempfile from unittest import mock from unittest.mock import MagicMock", "assert resp.status == 412 async def test_allow_head(aiohttp_client): \"\"\" Test allow_head", "aiohttp_client(app) r = await client.get('/a') assert r.status == 200 await", "follow_symlinks=True) client = await aiohttp_client(app) # Request the root of", "open(my_file_path, 'w') as fw: fw.write(data) app = web.Application() url =", "\"\"\" # Temporary directory. tmp_dir = tempfile.mkdtemp() def teardown(): #", "assert not resource.raw_match(\"/static\") async def test_add_view(aiohttp_client): app = web.Application() class", "return web.Response(body=data) with pytest.warns(DeprecationWarning): app.router.add_route('GET', '/', functools.partial(handler, b'hello')) client =", "so for the folder. \"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path)", "await r.release() r = await client.get('/b') assert r.status == 200", "the last added resource doesn't create a new resource. \"\"\"", "show_index=show_index) client = await aiohttp_client(app) # Request the root of", "route.handler.__doc__ == 'Doc' async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client): \"\"\" Tests the", "read_ = (await r.read()) assert read_ == data async def", "Tests the access to a resource that is neither a", "assert route.resource is None assert \"<SystemRoute 201: test>\" == repr(route)", "pytest.raises(RuntimeError): route.url_for() assert route.name is None assert route.resource is None", "client.get(url) assert r.status == 200 assert (await r.text()) == data", "== 404 async def test_partialy_applied_handler(aiohttp_client): app = web.Application() async def", "routes. \"\"\" app = web.Application() async def handler(_): return web.Response()", "403 async def test_access_symlink_loop(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to", "my_dir_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_dir_path) app = web.Application() #", "app.router.add_get('/b', handler, allow_head=False, name='b') client = await aiohttp_client(app) r =", "filename, data): \"\"\" Checks operation of static files with spaces", "MagicMock() special.is_dir.return_value = False special.is_file.return_value = False path = MagicMock()", "await r.release() r = await client.put(\"/a\") assert r.status == 405", "return web.Response() app.router.add_routes([ web.view(\"/a\", MyView) ]) client = await aiohttp_client(app)", "r.release() r = await client.head('/a') assert r.status == 200 await", "HTTP status returned. \"\"\" app = web.Application() with mock.patch('pathlib.Path.__new__') as", "file server when server does not have permissions to do", "mock from unittest.mock import MagicMock import pytest from aiohttp import", "the static directory. r = await client.get(prefix) assert r.status ==", "charset=utf-8\" read_ = (await r.read()) assert read_ == data async", "('test dir name', 'test dir file .txt', 'test text file", "test_reuse_last_added_resource(path): \"\"\" Test that adding a route with the same", "client = await aiohttp_client(app) r = await client.get(\"/a\") assert r.status", "client.put(\"/a\") assert r.status == 405 await r.release() async def test_decorate_view(aiohttp_client):", "== 403 async def test_access_symlink_loop(tmp_dir_path, aiohttp_client): \"\"\" Tests the access", "returned depending if we directory index should be shown or", "Checks that if a special resource is accessed (f.e. named", "handler, name='a') app.router.add_get('/b', handler, allow_head=False, name='b') client = await aiohttp_client(app)", "def test_system_route(): route = SystemRoute(web.HTTPCreated(reason='test')) with pytest.raises(RuntimeError): route.url_for() assert route.name", "statuses are returned depending if we directory index should be", "aiohttp_client(app) resp = await client.get('/') assert resp.status == 412 async", "router. \"\"\" app = web.Application() async def async_handler(request): \"\"\"Doc\"\"\" return", "directory The directory is destroyed at the end of the", "assert (await r.text()) == data @pytest.mark.parametrize('dir_name,filename,data', [ ('', 'test file.txt',", "app = web.Application() async def handler(_): return web.Response() app.router.add_get('/a', handler,", "file server. Try to access the root of static file", "403, '/', None, id=\"index_forbidden\"), pytest.param(True, 200, '/', b'<html>\\n<head>\\n<title>Index of /.</title>\\n'", "the access to a resource that is neither a file", "r.status == 200 await r.release() r = await client.head('/b') assert", "\"\"\"Doc\"\"\" return web.Response() def sync_handler(request): \"\"\"Doc\"\"\" return web.Response() app.router.add_get('/async', async_handler)", "405 await r.release() async def test_decorate_view(aiohttp_client): routes = web.RouteTableDef() @routes.view(\"/a\")", "dir_name, filename, data): \"\"\" Checks operation of static files with", "my_file_path = os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write('world')", "assert r.status == 405 await r.release() async def test_web_view(aiohttp_client): app", "path_constructor: special = MagicMock() special.is_dir.return_value = False special.is_file.return_value = False", "functools.partial(handler, b'hello')) client = await aiohttp_client(app) r = await client.get('/')", "def tmp_dir_path(request): \"\"\" Give a path for a temporary directory", "directory. r = await client.get('/my_dir') assert r.status == 403 async", "= web.Application() app.router.add_routes(routes) client = await aiohttp_client(app) r = await", "async def post(self): return web.Response() app.router.add_routes([ web.view(\"/a\", MyView) ]) client", "of static file server. Try to access the root of", "destroyed at the end of the test. \"\"\" # Temporary", "r.release() r = await client.put(\"/a\") assert r.status == 405 await", "test_partialy_applied_handler(aiohttp_client): app = web.Application() async def handler(data, request): return web.Response(body=data)", "import pytest from aiohttp import abc, web from aiohttp.web_urldispatcher import", "be resolved. \"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_dir_path) app", "== 200 assert (await r.text()) == data async def test_access_non_existing_resource(tmp_dir_path,", "Delete the whole directory: shutil.rmtree(tmp_dir) request.addfinalizer(teardown) return tmp_dir @pytest.mark.parametrize( \"show_index,status,prefix,data\",", "should be shown or not. \"\"\" # Put a file", "on the app router. \"\"\" app = web.Application() async def", "on routes. \"\"\" app = web.Application() async def handler(_): return", "the operation of static file server. Try to access the", "app = web.Application() # Register global static route: app.router.add_static(prefix, tmp_dir_path,", "def test_partialy_applied_handler(aiohttp_client): app = web.Application() async def handler(data, request): return", "route.url_for() assert route.name is None assert route.resource is None assert", "Test allow_head on routes. \"\"\" app = web.Application() async def", "name=\"a\") app.router.add_post(path, handler, name=\"a\") assert len(app.router.resources()) == 1 def test_resource_raw_match():", "the end of the test. \"\"\" # Temporary directory. tmp_dir", "of the test. \"\"\" # Temporary directory. tmp_dir = tempfile.mkdtemp()", "return tmp_dir @pytest.mark.parametrize( \"show_index,status,prefix,data\", [pytest.param(False, 403, '/', None, id=\"index_forbidden\"), pytest.param(True,", "def test_add_view(aiohttp_client): app = web.Application() class MyView(web.View): async def get(self):", "post(self): return web.Response() app.router.add_routes([ web.view(\"/a\", MyView) ]) client = await", "special.is_file.return_value = False path = MagicMock() path.joinpath.side_effect = lambda p:", "web.Application() async def handler(request): return web.Response() route = app.router.add_get(\"/a\", handler,", "fw.write(data) my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_symlink_path) app = web.Application()", "route = app.router.add_get(\"/a\", handler, name=\"a\") assert route.resource.raw_match(\"/a\") route = app.router.add_get(\"/{b}\",", "app = web.Application() class MyView(web.View): async def get(self): return web.Response()", "resolved. \"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_dir_path) app =", "\"\"\" Tests the access to a looped symlink, which could", "assert r.status == 404 async def test_access_special_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests", "aiohttp.web_urldispatcher import SystemRoute @pytest.fixture(scope='function') def tmp_dir_path(request): \"\"\" Give a path", "r = await client.get('/a') assert r.status == 200 await r.release()", "= os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write('world') app", "adding a route with the same name and path of", "await r.release() async def test_web_view(aiohttp_client): app = web.Application() class MyView(web.View):", "Give a path for a temporary directory The directory is", "== repr(route) assert 201 == route.status assert 'test' == route.reason", "after registering it on the app router. \"\"\" app =", "async def test_decorate_view(aiohttp_client): routes = web.RouteTableDef() @routes.view(\"/a\") class MyView(web.View): async", "handler(request): return web.Response() app.router.add_get(registered_path, handler) client = await aiohttp_client(app) r", "assert 201 == route.status assert 'test' == route.reason async def", "= await aiohttp_client(app) r = await client.get('/a') assert r.status ==", "path of the last added resource doesn't create a new", "client.post(\"/a\") assert r.status == 200 await r.release() r = await", "Try to access the root of static file server, and", "os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor:", "os.path.join(my_dir_path, filename) with open(my_file_path, 'w') as fw: fw.write(data) app =", "return web.Response() app = web.Application() app.router.add_routes(routes) client = await aiohttp_client(app)", "= web.Application() async def async_handler(request): \"\"\"Doc\"\"\" return web.Response() def sync_handler(request):", "\"\"\" Tests accessing non-existing resource Try to access a non-exiting", "def async_handler(request): \"\"\"Doc\"\"\" return web.Response() def sync_handler(request): \"\"\"Doc\"\"\" return web.Response()", "('/a:b', '/a:b'), ('/a@b', '/a@b'), ('/a:b', '/a%3Ab'), ]) async def test_url_escaping(aiohttp_client,", "sure that 404 HTTP status returned. \"\"\" app = web.Application()", "list a folder content of static file server when server", "client = await aiohttp_client(app) resp = await client.get('/') assert resp.status", "app.router.add_get(path, handler, name=\"a\") app.router.add_post(path, handler, name=\"a\") assert len(app.router.resources()) == 1", "os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as fw:", "server. Try to access the root of static file server,", "resource: assert route.handler.__doc__ == 'Doc' async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client): \"\"\"", "@routes.view(\"/a\") class MyView(web.View): async def get(self): return web.Response() async def", "async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client, dir_name, filename, data): \"\"\" Checks operation", "def test_url_escaping(aiohttp_client, registered_path, request_url): \"\"\" Tests accessing a resource with", "\"\"\" app = web.Application() async def handler(_): return web.Response() app.router.add_get('/a',", "async_handler(request): \"\"\"Doc\"\"\" return web.Response() def sync_handler(request): \"\"\"Doc\"\"\" return web.Response() app.router.add_get('/async',", "return web.Response() async def post(self): return web.Response() app.router.add_view(\"/a\", MyView) client", "name='a') app.router.add_get('/b', handler, allow_head=False, name='b') client = await aiohttp_client(app) r", "assert read_ == data async def test_follow_symlink(tmp_dir_path, aiohttp_client): \"\"\" Tests", ".txt', 'test text file folder') ]) async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client,", "'/', functools.partial(handler, b'hello')) client = await aiohttp_client(app) r = await", "static folder \"\"\" data = 'hello world' my_dir_path = os.path.join(tmp_dir_path,", "depending if we directory index should be shown or not.", "web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: path = MagicMock() path.joinpath.return_value =", "def test_access_special_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to a resource", "handler, name=\"a\") assert len(app.router.resources()) == 1 def test_resource_raw_match(): app =", "a directory. Checks that if a special resource is accessed", "static directory. r = await client.get('/my_symlink') assert r.status == 404", "def test_resource_raw_match(): app = web.Application() async def handler(request): return web.Response()", "'/static', b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/static/my_dir\">my_dir/</a></li>\\n' b'<li><a", "web.Application() # Register global static route: app.router.add_static('/', tmp_dir_path, follow_symlinks=True) client", "end of the test. \"\"\" # Temporary directory. tmp_dir =", "client = await aiohttp_client(app) r = await client.get('/') data =", "app.router.resources(): for route in resource: assert route.handler.__doc__ == 'Doc' async", "access the root of static file server, and make sure", "metadata of a handler after registering it on the app", "not be resolved. \"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_dir_path)", "200 await r.release() r = await client.post(\"/a\") assert r.status ==", "201 == route.status assert 'test' == route.reason async def test_412_is_returned(aiohttp_client):", "test_unauthorized_folder_access(tmp_dir_path, aiohttp_client): \"\"\" Tests the unauthorized access to a folder", "web.Response() app = web.Application() app.router.add_routes(routes) client = await aiohttp_client(app) r", "permissions to do so for the folder. \"\"\" my_dir_path =", "async def test_allow_head(aiohttp_client): \"\"\" Test allow_head on routes. \"\"\" app", "test_resource_raw_match(): app = web.Application() async def handler(request): return web.Response() route", "== 405 await r.release() @pytest.mark.parametrize(\"path\", [ '/a', '/{a}', ]) def", "of the last added resource doesn't create a new resource.", "== 405 await r.release() async def test_decorate_view(aiohttp_client): routes = web.RouteTableDef()", "import functools import os import shutil import tempfile from unittest", "static file server, and make sure that correct HTTP statuses", "'Doc' async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client): \"\"\" Tests the unauthorized access", "a route with the same name and path of the", "if we directory index should be shown or not. \"\"\"", "r.status == 200 await r.release() r = await client.get('/b') assert", "= await client.get('/') data = (await r.read()) assert data ==", "of static file server when server does not have permissions", "the test. \"\"\" # Temporary directory. tmp_dir = tempfile.mkdtemp() def", "'/a', '/{a}', ]) def test_reuse_last_added_resource(path): \"\"\" Test that adding a", "async def get(self): return web.Response() async def post(self): return web.Response()", "= await client.get('/my_symlink') assert r.status == 404 async def test_access_special_resource(tmp_dir_path,", "\"\"\" app = web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: special =", "fw.write('hello') my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, 'my_file_in_dir')", "static files with spaces \"\"\" my_dir_path = os.path.join(tmp_dir_path, dir_name) if", "a symlink, in static folder \"\"\" data = 'hello world'", "accessing a resource with \"\"\" app = web.Application() async def", "app.router.add_post(path, handler, name=\"a\") assert len(app.router.resources()) == 1 def test_resource_raw_match(): app", "path.resolve.return_value = path path.iterdir.return_value.__iter__.side_effect = PermissionError() path_constructor.return_value = path #", "= web.Application() async def handler(_): return web.Response() app.router.add_get('/a', handler, name='a')", "return web.Response() async def post(self): return web.Response() app.router.add_routes([ web.view(\"/a\", MyView)", "200 async def test_handler_metadata_persistence(): \"\"\" Tests accessing metadata of a", "assert \"<SystemRoute 201: test>\" == repr(route) assert 201 == route.status", "client.get('/my_symlink') assert r.status == 404 async def test_access_special_resource(tmp_dir_path, aiohttp_client): \"\"\"", "assert 'test' == route.reason async def test_412_is_returned(aiohttp_client): class MyRouter(abc.AbstractRouter): async", "dir_name: os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, filename) with open(my_file_path, 'w') as", "directory. r = await client.get('/my_symlink') assert r.status == 404 async", "symlink, in static folder \"\"\" data = 'hello world' my_dir_path", "async def handler(request): return web.Response() app.router.add_get(path, handler, name=\"a\") app.router.add_post(path, handler,", "'w') as fw: fw.write(data) app = web.Application() url = os.path.join('/',", "== 200 await r.release() r = await client.post(\"/a\") assert r.status", "in resource: assert route.handler.__doc__ == 'Doc' async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client):", "b'<html>\\n<head>\\n<title>Index of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/my_file\">my_file</a></li>\\n'", "r = await client.head('/a') assert r.status == 200 await r.release()", "show_index, status, prefix, data): \"\"\" Tests the operation of static", "test_allow_head(aiohttp_client): \"\"\" Test allow_head on routes. \"\"\" app = web.Application()", "def test_follow_symlink(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to a symlink,", "Request the root of the static directory. r = await", "r = await client.get('/my_symlink') assert r.status == 404 async def", "fw.write(data) app = web.Application() url = os.path.join('/', dir_name, filename) app.router.add_static('/',", "405 await r.release() async def test_web_view(aiohttp_client): app = web.Application() class", "tmp_dir_path, show_index=show_index) client = await aiohttp_client(app) # Request the root", "my_symlink_path = os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_symlink_path) app = web.Application() #", "test_handler_metadata_persistence(): \"\"\" Tests accessing metadata of a handler after registering", "404 HTTP status returned. \"\"\" app = web.Application() # Register", "web.Response(body=data) with pytest.warns(DeprecationWarning): app.router.add_route('GET', '/', functools.partial(handler, b'hello')) client = await", "= app.router.add_static(\"/static\", \".\") assert not resource.raw_match(\"/static\") async def test_add_view(aiohttp_client): app", "= web.Application(router=MyRouter()) client = await aiohttp_client(app) resp = await client.get('/')", "app.router.add_route('GET', '/', functools.partial(handler, b'hello')) client = await aiohttp_client(app) r =", "request_url): \"\"\" Tests accessing a resource with \"\"\" app =", "== \"text/html; charset=utf-8\" read_ = (await r.read()) assert read_ ==", "app.router.add_static('/', tmp_dir_path) client = await aiohttp_client(app) r = await client.get(url)", "pytest.warns(DeprecationWarning): app.router.add_get('/sync', sync_handler) for resource in app.router.resources(): for route in", "r.text()) == data @pytest.mark.parametrize('dir_name,filename,data', [ ('', 'test file.txt', 'test text'),", "r.status == 404 async def test_partialy_applied_handler(aiohttp_client): app = web.Application() async", "of static file server, and make sure that correct HTTP", "import shutil import tempfile from unittest import mock from unittest.mock", "async def handler(request): return web.Response() route = app.router.add_get(\"/a\", handler, name=\"a\")", "for the folder. \"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) app", "False path = MagicMock() path.joinpath.side_effect = lambda p: (special if", "the same name and path of the last added resource", "registered_path, request_url): \"\"\" Tests accessing a resource with \"\"\" app", "the root of static file server, and make sure that", "shown or not. \"\"\" # Put a file inside tmp_dir_path:", "path = MagicMock() path.joinpath.side_effect = lambda p: (special if p", "async def test_412_is_returned(aiohttp_client): class MyRouter(abc.AbstractRouter): async def resolve(self, request): raise", "\"\"\" my_dir_path = os.path.join(tmp_dir_path, dir_name) if dir_name: os.mkdir(my_dir_path) my_file_path =", "root of the static directory. r = await client.get('/special') assert", "r.release() r = await client.post(\"/a\") assert r.status == 200 await", "await client.get(request_url) assert r.status == 200 async def test_handler_metadata_persistence(): \"\"\"", "that if a special resource is accessed (f.e. named pipe", "that adding a route with the same name and path", "[ ('', 'test file.txt', 'test text'), ('test dir name', 'test", "directory. Checks that if a special resource is accessed (f.e.", "def handler(request): return web.Response() app.router.add_get(registered_path, handler) client = await aiohttp_client(app)", "200 assert (await r.text()) == data async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client):", "aiohttp_client, show_index, status, prefix, data): \"\"\" Tests the operation of", "the root of the static directory. r = await client.get('/my_symlink')", "(await r.text()) == data @pytest.mark.parametrize('dir_name,filename,data', [ ('', 'test file.txt', 'test", "async def resolve(self, request): raise web.HTTPPreconditionFailed() app = web.Application(router=MyRouter()) client", "the static directory. r = await client.get('/my_dir') assert r.status ==", "r.status == 405 await r.release() @pytest.mark.parametrize(\"path\", [ '/a', '/{a}', ])", "the root of the static directory. r = await client.get('/my_symlink/my_file_in_dir')", "/.</h1>\\n<ul>\\n' b'<li><a href=\"/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_root\"), pytest.param(True, 200, '/static',", "with spaces \"\"\" my_dir_path = os.path.join(tmp_dir_path, dir_name) if dir_name: os.mkdir(my_dir_path)", "async def handler(request): return web.Response() app.router.add_get(registered_path, handler) client = await", "for a temporary directory The directory is destroyed at the", "operation of static files with spaces \"\"\" my_dir_path = os.path.join(tmp_dir_path,", "= web.Application() url = os.path.join('/', dir_name, filename) app.router.add_static('/', tmp_dir_path) client", "abc, web from aiohttp.web_urldispatcher import SystemRoute @pytest.fixture(scope='function') def tmp_dir_path(request): \"\"\"", "SystemRoute(web.HTTPCreated(reason='test')) with pytest.raises(RuntimeError): route.url_for() assert route.name is None assert route.resource", "client.get(\"/a\") assert r.status == 200 await r.release() r = await", "path_constructor.return_value = path # Register global static route: app.router.add_static('/', tmp_dir_path,", "client = await aiohttp_client(app) r = await client.get(url) assert r.status", "the static directory. r = await client.get('/special') assert r.status ==", "async def test_access_special_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to a", "app.router.add_view(\"/a\", MyView) client = await aiohttp_client(app) r = await client.get(\"/a\")", "server does not have permissions to do so for the", "\"\"\" Test that adding a route with the same name", "def sync_handler(request): \"\"\"Doc\"\"\" return web.Response() app.router.add_get('/async', async_handler) with pytest.warns(DeprecationWarning): app.router.add_get('/sync',", "it on the app router. \"\"\" app = web.Application() async", "is accessed (f.e. named pipe or UNIX domain socket) then", "not have permissions to do so for the folder. \"\"\"", "assert r.status == 403 async def test_access_symlink_loop(tmp_dir_path, aiohttp_client): \"\"\" Tests", "data): \"\"\" Tests the operation of static file server. Try", "handler(request): return web.Response() app.router.add_get(path, handler, name=\"a\") app.router.add_post(path, handler, name=\"a\") assert", "spaces \"\"\" my_dir_path = os.path.join(tmp_dir_path, dir_name) if dir_name: os.mkdir(my_dir_path) my_file_path", "await r.release() r = await client.head('/b') assert r.status == 405", "route: app.router.add_static(prefix, tmp_dir_path, show_index=show_index) client = await aiohttp_client(app) # Request", "static directory. r = await client.get('/my_dir') assert r.status == 403", "text file folder') ]) async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client, dir_name, filename,", "web from aiohttp.web_urldispatcher import SystemRoute @pytest.fixture(scope='function') def tmp_dir_path(request): \"\"\" Give", "a file inside tmp_dir_path: my_file_path = os.path.join(tmp_dir_path, 'my_file') with open(my_file_path,", "os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, filename) with open(my_file_path, 'w') as fw:", "assert (await r.text()) == data async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client): \"\"\"", "client.get(prefix) assert r.status == status if data: assert r.headers['Content-Type'] ==", "folder content of static file server when server does not", "aiohttp_client(app) r = await client.get(request_url) assert r.status == 200 async", "= await client.put(\"/a\") assert r.status == 405 await r.release() async", "await aiohttp_client(app) r = await client.get(url) assert r.status == 200", "await r.release() @pytest.mark.parametrize(\"path\", [ '/a', '/{a}', ]) def test_reuse_last_added_resource(path): \"\"\"", "/.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_root\"),", "aiohttp_client): \"\"\" Tests the unauthorized access to a folder of", "in app.router.resources(): for route in resource: assert route.handler.__doc__ == 'Doc'", "= os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) app = web.Application() with mock.patch('pathlib.Path.__new__') as", "r.status == 405 await r.release() async def test_web_view(aiohttp_client): app =", "app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app) # Request the", "1 def test_resource_raw_match(): app = web.Application() async def handler(request): return", "data): \"\"\" Checks operation of static files with spaces \"\"\"", "= special path_constructor.return_value = path # Register global static route:", "static directory. r = await client.get('/my_symlink/my_file_in_dir') assert r.status == 200", "'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write(data) my_symlink_path = os.path.join(tmp_dir_path,", "request): raise web.HTTPPreconditionFailed() app = web.Application(router=MyRouter()) client = await aiohttp_client(app)", "class MyRouter(abc.AbstractRouter): async def resolve(self, request): raise web.HTTPPreconditionFailed() app =", "async def handler(_): return web.Response() app.router.add_get('/a', handler, name='a') app.router.add_get('/b', handler,", "root of the static directory. r = await client.get('/my_symlink') assert", "new resource. \"\"\" app = web.Application() async def handler(request): return", "= app.router.add_get(\"/a\", handler, name=\"a\") assert route.resource.raw_match(\"/a\") route = app.router.add_get(\"/{b}\", handler,", "Tests accessing metadata of a handler after registering it on", "r.release() async def test_decorate_view(aiohttp_client): routes = web.RouteTableDef() @routes.view(\"/a\") class MyView(web.View):", "return web.Response() app.router.add_view(\"/a\", MyView) client = await aiohttp_client(app) r =", "r = await client.get(request_url) assert r.status == 200 async def", "r.status == 200 async def test_handler_metadata_persistence(): \"\"\" Tests accessing metadata", "file .txt', 'test text file folder') ]) async def test_access_to_the_file_with_spaces(tmp_dir_path,", "web.Response() def sync_handler(request): \"\"\"Doc\"\"\" return web.Response() app.router.add_get('/async', async_handler) with pytest.warns(DeprecationWarning):", "\"\"\" Tests the unauthorized access to a folder of static", "handler, name=\"a\") assert route.resource.raw_match(\"/a\") route = app.router.add_get(\"/{b}\", handler, name=\"b\") assert", "@pytest.mark.parametrize( \"show_index,status,prefix,data\", [pytest.param(False, 403, '/', None, id=\"index_forbidden\"), pytest.param(True, 200, '/',", "= await aiohttp_client(app) resp = await client.get('/') assert resp.status ==", "global static route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app)", "import abc, web from aiohttp.web_urldispatcher import SystemRoute @pytest.fixture(scope='function') def tmp_dir_path(request):", "= web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: path = MagicMock() path.joinpath.return_value", "= await aiohttp_client(app) r = await client.get(url) assert r.status ==", "client.head('/b') assert r.status == 405 await r.release() @pytest.mark.parametrize(\"path\", [ '/a',", "web.Response() async def post(self): return web.Response() app.router.add_routes([ web.view(\"/a\", MyView) ])", "= os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_symlink_path) app = web.Application() # Register", "functools import os import shutil import tempfile from unittest import", "r.release() r = await client.head('/b') assert r.status == 405 await", "directory is destroyed at the end of the test. \"\"\"", "to list a folder content of static file server when", "def post(self): return web.Response() app = web.Application() app.router.add_routes(routes) client =", "'special' else path) path.resolve.return_value = path special.resolve.return_value = special path_constructor.return_value", "def handler(data, request): return web.Response(body=data) with pytest.warns(DeprecationWarning): app.router.add_route('GET', '/', functools.partial(handler,", "handler(_): return web.Response() app.router.add_get('/a', handler, name='a') app.router.add_get('/b', handler, allow_head=False, name='b')", "whole directory: shutil.rmtree(tmp_dir) request.addfinalizer(teardown) return tmp_dir @pytest.mark.parametrize( \"show_index,status,prefix,data\", [pytest.param(False, 403,", "of /.</title>\\n' b'</head>\\n<body>\\n<h1>Index of /.</h1>\\n<ul>\\n' b'<li><a href=\"/static/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/static/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>',", "inside tmp_dir_path: my_file_path = os.path.join(tmp_dir_path, 'my_file') with open(my_file_path, 'w') as", "of static files with spaces \"\"\" my_dir_path = os.path.join(tmp_dir_path, dir_name)", "special resource is accessed (f.e. named pipe or UNIX domain", "tmp_dir_path, follow_symlinks=True) client = await aiohttp_client(app) # Request the root", "(f.e. named pipe or UNIX domain socket) then 404 HTTP", "test_access_special_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to a resource that", "handler(request): return web.Response() route = app.router.add_get(\"/a\", handler, name=\"a\") assert route.resource.raw_match(\"/a\")", "await client.get('/a') assert r.status == 200 await r.release() r =", "name=\"b\") assert route.resource.raw_match(\"/{b}\") resource = app.router.add_static(\"/static\", \".\") assert not resource.raw_match(\"/static\")", "Register global static route: app.router.add_static('/', tmp_dir_path, follow_symlinks=True) client = await", "test_add_view(aiohttp_client): app = web.Application() class MyView(web.View): async def get(self): return", "\"show_index,status,prefix,data\", [pytest.param(False, 403, '/', None, id=\"index_forbidden\"), pytest.param(True, 200, '/', b'<html>\\n<head>\\n<title>Index", "await aiohttp_client(app) r = await client.get(request_url) assert r.status == 200", "with mock.patch('pathlib.Path.__new__') as path_constructor: special = MagicMock() special.is_dir.return_value = False", "pytest from aiohttp import abc, web from aiohttp.web_urldispatcher import SystemRoute", "web.Application() async def handler(request): return web.Response() app.router.add_get(path, handler, name=\"a\") app.router.add_post(path,", "tmp_dir_path: my_file_path = os.path.join(tmp_dir_path, 'my_file') with open(my_file_path, 'w') as fw:", "of /.</h1>\\n<ul>\\n' b'<li><a href=\"/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_root\"), pytest.param(True, 200,", "as fw: fw.write('hello') my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path =", "non-existing resource Try to access a non-exiting resource and make", "sync_handler(request): \"\"\"Doc\"\"\" return web.Response() app.router.add_get('/async', async_handler) with pytest.warns(DeprecationWarning): app.router.add_get('/sync', sync_handler)", "== 'Doc' async def test_unauthorized_folder_access(tmp_dir_path, aiohttp_client): \"\"\" Tests the unauthorized", "folder. \"\"\" my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) app = web.Application()", "path.joinpath.return_value = path path.resolve.return_value = path path.iterdir.return_value.__iter__.side_effect = PermissionError() path_constructor.return_value", "repr(route) assert 201 == route.status assert 'test' == route.reason async", "\"\"\" Tests the operation of static file server. Try to", "non-exiting resource and make sure that 404 HTTP status returned.", "'test text file folder') ]) async def test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client, dir_name,", "a temporary directory The directory is destroyed at the end", "@pytest.fixture(scope='function') def tmp_dir_path(request): \"\"\" Give a path for a temporary", "the static directory. r = await client.get('/my_symlink/my_file_in_dir') assert r.status ==", "access to a folder of static file server. Try to", "route = app.router.add_get(\"/{b}\", handler, name=\"b\") assert route.resource.raw_match(\"/{b}\") resource = app.router.add_static(\"/static\",", "shutil import tempfile from unittest import mock from unittest.mock import", "= os.path.join(my_dir_path, 'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write(data) my_symlink_path", "href=\"/my_dir\">my_dir/</a></li>\\n' b'<li><a href=\"/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_root\"), pytest.param(True, 200, '/static', b'<html>\\n<head>\\n<title>Index of", "name', 'test dir file .txt', 'test text file folder') ])", "== 'special' else path) path.resolve.return_value = path special.resolve.return_value = special", "HTTP status returned. \"\"\" app = web.Application() # Register global", "web.Response() async def post(self): return web.Response() app = web.Application() app.router.add_routes(routes)", "directory. r = await client.get('/non_existing_resource') assert r.status == 404 @pytest.mark.parametrize('registered_path,request_url',", "path_constructor: path = MagicMock() path.joinpath.return_value = path path.resolve.return_value = path", "\"\"\" Tests accessing a resource with \"\"\" app = web.Application()", "lambda p: (special if p == 'special' else path) path.resolve.return_value", "special.resolve.return_value = special path_constructor.return_value = path # Register global static", "directory index should be shown or not. \"\"\" # Put", "Tests the access to a symlink, in static folder \"\"\"", "data async def test_follow_symlink(tmp_dir_path, aiohttp_client): \"\"\" Tests the access to", "href=\"/static/my_file\">my_file</a></li>\\n' b'</ul>\\n</body>\\n</html>', id=\"index_static\")]) async def test_access_root_of_static_handler(tmp_dir_path, aiohttp_client, show_index, status, prefix,", "the access to a looped symlink, which could not be", "app.router.add_static(prefix, tmp_dir_path, show_index=show_index) client = await aiohttp_client(app) # Request the", "assert r.status == 200 async def test_handler_metadata_persistence(): \"\"\" Tests accessing", "request.addfinalizer(teardown) return tmp_dir @pytest.mark.parametrize( \"show_index,status,prefix,data\", [pytest.param(False, 403, '/', None, id=\"index_forbidden\"),", "special path_constructor.return_value = path # Register global static route: app.router.add_static('/',", "'my_file_in_dir') with open(my_file_path, 'w') as fw: fw.write('world') app = web.Application()", "Test that adding a route with the same name and", "== 200 await r.release() r = await client.put(\"/a\") assert r.status", "a folder of static file server. Try to list a", "'test' == route.reason async def test_412_is_returned(aiohttp_client): class MyRouter(abc.AbstractRouter): async def", "registering it on the app router. \"\"\" app = web.Application()", "data = 'hello world' my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path", "with \"\"\" app = web.Application() async def handler(request): return web.Response()", "directory: shutil.rmtree(tmp_dir) request.addfinalizer(teardown) return tmp_dir @pytest.mark.parametrize( \"show_index,status,prefix,data\", [pytest.param(False, 403, '/',", "= os.path.join(tmp_dir_path, 'my_symlink') os.symlink(my_dir_path, my_dir_path) app = web.Application() # Register", "mock.patch('pathlib.Path.__new__') as path_constructor: special = MagicMock() special.is_dir.return_value = False special.is_file.return_value", "as path_constructor: path = MagicMock() path.joinpath.return_value = path path.resolve.return_value =", "== data async def test_access_non_existing_resource(tmp_dir_path, aiohttp_client): \"\"\" Tests accessing non-existing", "my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) my_file_path = os.path.join(my_dir_path, 'my_file_in_dir') with", "route: app.router.add_static('/', tmp_dir_path, show_index=True) client = await aiohttp_client(app) # Request", "my_dir_path = os.path.join(tmp_dir_path, 'my_dir') os.mkdir(my_dir_path) app = web.Application() with mock.patch('pathlib.Path.__new__')", "a non-exiting resource and make sure that 404 HTTP status", "doesn't create a new resource. \"\"\" app = web.Application() async", "with mock.patch('pathlib.Path.__new__') as path_constructor: path = MagicMock() path.joinpath.return_value = path", "# Delete the whole directory: shutil.rmtree(tmp_dir) request.addfinalizer(teardown) return tmp_dir @pytest.mark.parametrize(", "operation of static file server. Try to access the root", "await r.release() r = await client.head('/a') assert r.status == 200", "client.head('/a') assert r.status == 200 await r.release() r = await", "r.status == 200 await r.release() r = await client.put(\"/a\") assert", "r = await client.get('/my_dir') assert r.status == 403 async def", "the static directory. r = await client.get('/non_existing_resource') assert r.status ==", "= web.Application() with mock.patch('pathlib.Path.__new__') as path_constructor: special = MagicMock() special.is_dir.return_value", "route.reason async def test_412_is_returned(aiohttp_client): class MyRouter(abc.AbstractRouter): async def resolve(self, request):", "app.router.add_get('/async', async_handler) with pytest.warns(DeprecationWarning): app.router.add_get('/sync', sync_handler) for resource in app.router.resources():", "of a handler after registering it on the app router.", "post(self): return web.Response() app.router.add_view(\"/a\", MyView) client = await aiohttp_client(app) r", "r.status == status if data: assert r.headers['Content-Type'] == \"text/html; charset=utf-8\"", "test_access_to_the_file_with_spaces(tmp_dir_path, aiohttp_client, dir_name, filename, data): \"\"\" Checks operation of static", "server when server does not have permissions to do so", "assert r.status == 200 await r.release() r = await client.put(\"/a\")", "app = web.Application() # Register global static route: app.router.add_static('/', tmp_dir_path,", "os.path.join('/', dir_name, filename) app.router.add_static('/', tmp_dir_path) client = await aiohttp_client(app) r", "os.symlink(my_dir_path, my_dir_path) app = web.Application() # Register global static route:", "await client.get('/b') assert r.status == 200 await r.release() r =", "= web.Application() async def handler(request): return web.Response() app.router.add_get(registered_path, handler) client", "\"\"\"Doc\"\"\" return web.Response() app.router.add_get('/async', async_handler) with pytest.warns(DeprecationWarning): app.router.add_get('/sync', sync_handler) for" ]
[ "len(previous_unconflicted) < len(unconflicted_ind) : z_mu = z_mu[unconflicted_ind] z_sigma2_log = z_sigma2_log[unconflicted_ind]", "= metrics.precision_score(self.true_label, new_predict, average='micro') recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro') return", "precision_micro class GraphConvSparse(nn.Module): def __init__(self, input_dim, output_dim, activation = F.relu,", "[i1 for i1 in mps if self.pred_label[i1] == c2] cost[i][j]", "self.mean return self.mean, self.logstd ,sampled_z @staticmethod def decode(z): A_pred =", "activation def forward(self, inputs, adj): x = inputs x =", "range(self.nClusters): G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1)) return torch.cat(G,1) def gaussian_pdf_log(self,x,mu,log_sigma2): c = -0.5 *", "torch.rand(input_dim, output_dim)*2*init_range - init_range return nn.Parameter(initial) def q_mat(X, centers, alpha=1.0):", "np import torch.nn as nn import scipy.sparse as sp import", "= 'diag') for _ in epoch_bar: opti.zero_grad() _,_, z =", "* adj.shape[0] - adj.sum()) / adj.sum() weight_tensor[weight_mask] = pos_weight_orig return", "= X.detach().numpy() centers = centers.detach().numpy() if X.size == 0: q", "= GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x) # GMM", "unconf_indices[self.generate_centers(emb_unconf)] for i, k in enumerate(unconf_indices): adj_k = adj[k].tocsr().indices if", "c in range(self.nClusters): G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1)) return torch.cat(G,1) def gaussian_pdf_log(self,x,mu,log_sigma2): c =", "average='micro') precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro') recall_micro = metrics.recall_score(self.true_label, new_predict,", "= self.decode(emb) unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2) if", "def predict(self, z): pi = self.pi log_sigma2_c = self.log_sigma2_c mu_c", "= z_sigma2_log[unconflicted_ind] emb_unconf = emb[unconflicted_ind] emb_conf = emb[conflicted_ind] previous_conflicted =", "self.embedding_size) sampled_z = gaussian_noise * torch.exp(self.logstd) + self.mean return self.mean,", "weight_mask = adj_label.to_dense().view(-1) == 1 weight_tensor = torch.ones(weight_mask.size(0)) pos_weight_orig =", "for i in range(q.shape[0]): confidence1[i] = q[i,a[i,-1]] confidence2[i] = q[i,a[i,-2]]", "self.predict(emb_unconf) nn = NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy()) _, indices = nn.kneighbors(self.mu_c.detach().numpy())", "= [ind for ind, elm in enumerate(self.pred_label) if elm ==", "kwargs['num_features'] self.embedding_size = kwargs['embedding_size'] self.nClusters = kwargs['nClusters'] # VGAE training", "yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det yita_c = yita_c / (yita_c.sum(1).view(-1,1)) KL1 =", "f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro') precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro')", "= GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x) self.gcn_logstddev =", "weight_tensor, norm, epochs, lr, save_path, dataset): opti = Adam(self.parameters(), lr=lr)", "precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro,", "input_dim, output_dim, activation = F.relu, **kwargs): super(GraphConvSparse, self).__init__(**kwargs) self.weight =", "axis=1)) return q def generate_unconflicted_data_index(emb, centers_emb, beta1, beta2): unconf_indices =", "activation = lambda x:x) self.gcn_logstddev = GraphConvSparse( self.num_neurons, self.embedding_size, activation", "by Munkres algorithm m = Munkres() cost = cost.__neg__().tolist() indexes", "torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1) return c def predict(self, z): pi = self.pi log_sigma2_c", "& <NAME> (<EMAIL>) # @Paper : Rethinking Graph Autoencoder Models", "= self.activation(x) return outputs class ReGMM_VGAE(nn.Module): def __init__(self, **kwargs): super(ReGMM_VGAE,", "x_ = self.decode(z) loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight = weight_tensor)", "= weight_tensor) Loss = Loss * features.size(0) yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det", "gmm = GaussianMixture(n_components = self.nClusters , covariance_type = 'diag') for", "loss.backward() opti.step() gmm.fit_predict(z.detach().numpy()) self.pi.data = torch.from_numpy(gmm.weights_) self.mu_c.data = torch.from_numpy(gmm.means_) self.log_sigma2_c.data", "new_predict) f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro') precision_macro = metrics.precision_score(self.true_label, new_predict,", "f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore)) fh =", "self.mu_c det = 1e-2 yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det yita = yita_c.detach().numpy()", "weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) if len(previous_unconflicted) <", "'diag') for _ in epoch_bar: opti.zero_grad() _,_, z = self.encode(features,", "= 1 for j in adj_k: if np.isin(j, unconf_indices) and", "in adj_k: if np.isin(j, unconf_indices) and (np.isin(idx[i], adj_k)) and (y_pred[k]", "super(GraphConvSparse, self).__init__(**kwargs) self.weight = random_uniform_init(input_dim, output_dim) self.activation = activation def", "for _ in epoch_bar: opti.zero_grad() _,_, z = self.encode(features, adj)", "def ELBO_Loss(self, features, adj, x_, adj_label, weight_tensor, norm, z_mu, z_sigma2_log,", "yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det yita = yita_c.detach().numpy() return np.argmax(yita, axis=1) def", "as np import torch.nn as nn import scipy.sparse as sp", "<NAME> (<EMAIL>) & <NAME> (<EMAIL>) # @Paper : Rethinking Graph", "def evaluationClusterModelFromLabel(self): nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label) adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label)", "= torch.log(torch.from_numpy(gmm.covariances_)) self.logstd = self.mean def ELBO_Loss(self, features, adj, x_,", "emb_unconf = emb[unconflicted_ind] emb_conf = emb[conflicted_ind] previous_conflicted = conflicted_ind previous_unconflicted", "if self.pred_label[i1] == c2] cost[i][j] = len(mps_d) # match two", "= nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) def pretrain(self, adj, features, adj_label, y, weight_tensor,", "recall_macro, f1_micro, precision_micro, recall_micro def evaluationClusterModelFromLabel(self): nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label)", "unconf_indices) and (np.isin(idx[i], adj_k)) and (y_pred[k] != y_pred[j]): adj[k, j]", "numclass1 != numclass2: print('Class Not equal, Error!!!!') return 0 cost", "(yita_c.sum(1).view(-1,1)) KL1 = 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+ torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+ (z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1)) Loss1 =", "adj[k, idx[i]] = 1 for j in adj_k: if np.isin(j,", "torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+ (z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1)) Loss1 = KL1 KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1)) Loss1 -= KL2", "/ (1.0 + (np.sum(np.square(np.expand_dims(X, 1) - centers), axis=2) / alpha))", "epoch_bar = tqdm(range(epochs)) gmm = GaussianMixture(n_components = self.nClusters , covariance_type", "precision_macro, f1_micro, precision_micro class GraphConvSparse(nn.Module): def __init__(self, input_dim, output_dim, activation", "= 1.0 / (1.0 + (np.sum(np.square(np.expand_dims(X, 1) - centers), axis=2)", "= q ** ((alpha + 1.0) / 2.0) q =", "= list(set(self.true_label)) numclass1 = len(l1) l2 = list(set(self.pred_label)) numclass2 =", "= q[i,a[i,-1]] confidence2[i] = q[i,a[i,-2]] if (confidence1[i]) > beta1 and", "KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1)) Loss1 -= KL2 return Loss, Loss1, Loss+Loss1 def", "class ReGMM_VGAE(nn.Module): def __init__(self, **kwargs): super(ReGMM_VGAE, self).__init__() self.num_neurons = kwargs['num_neurons']", "z_sigma2_log[unconflicted_ind] emb_unconf = emb[unconflicted_ind] emb_conf = emb[conflicted_ind] previous_conflicted = conflicted_ind", "unconf_indices = np.asarray(unconf_indices, dtype=int) conf_indices = np.asarray(conf_indices, dtype=int) return unconf_indices,", "def generate_centers(self, emb_unconf): y_pred = self.predict(emb_unconf) nn = NearestNeighbors(n_neighbors= 1,", "dataset): self.load_state_dict(torch.load(save_path + dataset + '/pretrain/model.pk')) opti = Adam(self.parameters(), lr=lr,", "def q_mat(X, centers, alpha=1.0): X = X.detach().numpy() centers = centers.detach().numpy()", "return adj, adj_label, weight_tensor def train(self, adj_norm, adj, features, y,", "log_sigma2_c = self.log_sigma2_c mu_c = self.mu_c det = 1e-2 yita_c", "= kwargs['num_neurons'] self.num_features = kwargs['num_features'] self.embedding_size = kwargs['embedding_size'] self.nClusters =", "mps if self.pred_label[i1] == c2] cost[i][j] = len(mps_d) # match", "0.96 beta2 = beta2 * 0.98 if epoch % 50", "sp import torch.nn.functional as F from tqdm import tqdm from", "= m.compute(cost) # get the match results new_predict = np.zeros(len(self.pred_label))", "= self.log_sigma2_c mu_c = self.mu_c det = 1e-2 yita_c =", "len(l2) if numclass1 != numclass2: print('Class Not equal, Error!!!!') return", "torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T), torch.FloatTensor(adj_label[1]), torch.Size(adj_label[2])) weight_mask = adj_label.to_dense().view(-1) == 1 weight_tensor =", "= metrics.recall_score(self.true_label, new_predict, average='micro') return acc, f1_macro, precision_macro, recall_macro, f1_micro,", "self.encode(features, adj_norm) x_ = self.decode(emb) unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c,", "with label==c2 in the pred_label list ai = [ind for", "-*- # @Authors : <NAME> (<EMAIL>) & <NAME> (<EMAIL>) #", "for i1, e1 in enumerate(self.true_label) if e1 == c1] for", "axis=2) / alpha)) q = q ** ((alpha + 1.0)", "the match results new_predict = np.zeros(len(self.pred_label)) for i, c in", "label in l2: c2 = l2[indexes[i][1]] # ai is the", "unconf_indices.append(i) else: conf_indices.append(i) unconf_indices = np.asarray(unconf_indices, dtype=int) conf_indices = np.asarray(conf_indices,", "epochs, lr, save_path, dataset): opti = Adam(self.parameters(), lr=lr) epoch_bar =", "_ in epoch_bar: opti.zero_grad() _,_, z = self.encode(features, adj) x_", "self.mu_c.data = torch.from_numpy(gmm.means_) self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_)) self.logstd = self.mean def", "equal, Error!!!!') return 0 cost = np.zeros((numclass1, numclass2), dtype=int) for", "weight_tensor) loss.backward() opti.step() gmm.fit_predict(z.detach().numpy()) self.pi.data = torch.from_numpy(gmm.weights_) self.mu_c.data = torch.from_numpy(gmm.means_)", "metrics.recall_score(self.true_label, new_predict, average='macro') f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro') precision_micro =", "= self.pi mu_c = self.mu_c log_sigma2_c = self.log_sigma2_c det =", "(np.isin(idx[i], adj_k)) and (y_pred[k] != y_pred[j]): adj[k, j] = 0", "def clusteringAcc(self): # best mapping between true_label and predict label", "return q def generate_unconflicted_data_index(emb, centers_emb, beta1, beta2): unconf_indices = []", "training parameters self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True) self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True)", "class clustering_metrics(): def __init__(self, true_label, predict_label): self.true_label = true_label self.pred_label", "elbo_loss.backward() opti.step() lr_s.step() def gaussian_pdfs_log(self,x,mus,log_sigma2s): G=[] for c in range(self.nClusters):", "torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det yita_c = yita_c / (yita_c.sum(1).view(-1,1)) KL1 = 0.5 *", "generate_unconflicted_data_index(emb, centers_emb, beta1, beta2): unconf_indices = [] conf_indices = []", "< len(unconflicted_ind) : z_mu = z_mu[unconflicted_ind] z_sigma2_log = z_sigma2_log[unconflicted_ind] emb_unconf", "if numclass1 != numclass2: print('Class Not equal, Error!!!!') return 0", "= generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2) if epoch == 0: adj,", "recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore)) fh = open('recoder.txt', 'a')", "-= KL2 return Loss, Loss1, Loss+Loss1 def generate_centers(self, emb_unconf): y_pred", "l2 = list(set(self.pred_label)) numclass2 = len(l2) if numclass1 != numclass2:", "conflicted_ind) if len(previous_unconflicted) < len(unconflicted_ind) : z_mu = z_mu[unconflicted_ind] z_sigma2_log", "acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro = cm.evaluationClusterModelFromLabel() elbo_loss.backward()", "match two clustering results by Munkres algorithm m = Munkres()", "fh.close() return acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro class", "as nn import scipy.sparse as sp import torch.nn.functional as F", "in epoch_bar: opti.zero_grad() _,_, z = self.encode(features, adj) x_ =", "z_mu = z_mu[previous_unconflicted] z_sigma2_log = z_sigma2_log[previous_unconflicted] emb_unconf = emb[previous_unconflicted] emb_conf", "sampled_z = gaussian_noise * torch.exp(self.logstd) + self.mean return self.mean, self.logstd", "1.0 / (1.0 + (np.sum(np.square(np.expand_dims(X, 1) - centers), axis=2) /", "in enumerate(l1): # correponding label in l2: c2 = l2[indexes[i][1]]", "= torch.ones(weight_mask.size(0)) pos_weight_orig = float(adj.shape[0] * adj.shape[0] - adj.sum()) /", "torch.exp(self.logstd) + self.mean return self.mean, self.logstd ,sampled_z @staticmethod def decode(z):", "metrics.recall_score(self.true_label, new_predict, average='micro') return acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro,", "in enumerate(self.pred_label) if elm == c2] new_predict[ai] = c acc", "csv epoch_bar = tqdm(range(epochs)) previous_unconflicted = [] previous_conflicted = []", "q.max(1) confidence2 = np.zeros((q.shape[0],)) a = np.argsort(q, axis=1) for i", "self.true_label = true_label self.pred_label = predict_label def clusteringAcc(self): # best", ", covariance_type = 'diag') for _ in epoch_bar: opti.zero_grad() _,_,", "y, weight_tensor, norm, epochs, lr, save_path, dataset): opti = Adam(self.parameters(),", "Loss * features.size(0) yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det yita_c = yita_c /", "opti.zero_grad() z_mu, z_sigma2_log, emb = self.encode(features, adj_norm) x_ = self.decode(emb)", "predict_label def clusteringAcc(self): # best mapping between true_label and predict", "loss, loss1, elbo_loss = self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor, norm,", "f1_micro, precision_micro class GraphConvSparse(nn.Module): def __init__(self, input_dim, output_dim, activation =", "= self.decode(z) loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight = weight_tensor) loss.backward()", "new_predict, average='macro') recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro') f1_micro = metrics.f1_score(self.true_label,", "activation = F.relu, **kwargs): super(GraphConvSparse, self).__init__(**kwargs) self.weight = random_uniform_init(input_dim, output_dim)", "x = inputs x = torch.mm(x,self.weight) x = torch.mm(adj, x)", "* F.binary_cross_entropy(x_.view(-1), adj_label, weight = weight_tensor) Loss = Loss *", "== 1 weight_tensor = torch.ones(weight_mask.size(0)) pos_weight_orig = float(adj.shape[0] * adj.shape[0]", "confidence2 = np.zeros((q.shape[0],)) a = np.argsort(q, axis=1) for i in", ": epoch_stable += 1 z_mu = z_mu[previous_unconflicted] z_sigma2_log = z_sigma2_log[previous_unconflicted]", "[] conf_indices = [] q = q_mat(emb, centers_emb, alpha=1.0) confidence1", "parameters self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True) self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) self.log_sigma2_c", ": MIT License import torch import numpy as np import", "Munkres() cost = cost.__neg__().tolist() indexes = m.compute(cost) # get the", "self.num_neurons = kwargs['num_neurons'] self.num_features = kwargs['num_features'] self.embedding_size = kwargs['embedding_size'] self.nClusters", "1 z_mu = z_mu[previous_unconflicted] z_sigma2_log = z_sigma2_log[previous_unconflicted] emb_unconf = emb[previous_unconflicted]", "f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro') precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro')", "adj_k)) and (y_pred[k] == y_pred[idx[i]]) : adj[k, idx[i]] = 1", "Loss+Loss1 def generate_centers(self, emb_unconf): y_pred = self.predict(emb_unconf) nn = NearestNeighbors(n_neighbors=", "= torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T), torch.FloatTensor(adj_label[1]), torch.Size(adj_label[2])) weight_mask = adj_label.to_dense().view(-1) == 1 weight_tensor", "0 y_pred = self.predict(emb) emb_unconf = emb[unconf_indices] adj = adj.tolil()", "#!/usr/bin/env python # -*- coding: utf-8 -*- # @Authors :", "self.num_neurons, self.embedding_size, activation = lambda x:x) # GMM training parameters", "= self.predict(emb) emb_unconf = emb[unconf_indices] adj = adj.tolil() idx =", "as F from tqdm import tqdm from torch.optim import Adam", "# best mapping between true_label and predict label l1 =", "self.weight = random_uniform_init(input_dim, output_dim) self.activation = activation def forward(self, inputs,", "self.logstd ,sampled_z @staticmethod def decode(z): A_pred = torch.sigmoid(torch.matmul(z,z.t())) return A_pred", "nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True) self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True)", "= lambda x:x) self.gcn_logstddev = GraphConvSparse( self.num_neurons, self.embedding_size, activation =", "results new_predict = np.zeros(len(self.pred_label)) for i, c in enumerate(l1): #", "# VGAE training parameters self.base_gcn = GraphConvSparse( self.num_features, self.num_neurons) self.gcn_mean", "# GMM training parameters self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True) self.mu_c =", "beta1, beta2, save_path, dataset): self.load_state_dict(torch.load(save_path + dataset + '/pretrain/model.pk')) opti", "save_path, dataset): self.load_state_dict(torch.load(save_path + dataset + '/pretrain/model.pk')) opti = Adam(self.parameters(),", "1) - centers), axis=2) / alpha)) q = q **", "a = np.argsort(q, axis=1) for i in range(q.shape[0]): confidence1[i] =", "evaluationClusterModelFromLabel(self): nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label) adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label) acc,", "[] previous_conflicted = [] epoch_stable = 0 for epoch in", "== c2] new_predict[ai] = c acc = metrics.accuracy_score(self.true_label, new_predict) f1_macro", "adj_label, y, weight_tensor, norm, epochs, lr, save_path, dataset): opti =", "x:x) # GMM training parameters self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True) self.mu_c", "nn.Parameter(initial) def q_mat(X, centers, alpha=1.0): X = X.detach().numpy() centers =", "q = 1.0 / (1.0 + (np.sum(np.square(np.expand_dims(X, 1) - centers),", "sp.eye(adj.shape[0]) adj_label = sparse_to_tuple(adj_label) adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T), torch.FloatTensor(adj_label[1]), torch.Size(adj_label[2])) weight_mask", "emb_conf = emb[previous_conflicted] if epoch_stable >= 15: epoch_stable = 0", "/ alpha)) q = q ** ((alpha + 1.0) /", "indexes = m.compute(cost) # get the match results new_predict =", "metrics.precision_score(self.true_label, new_predict, average='macro') recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro') f1_micro =", "recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore) ) fh.write('\\r\\n') fh.flush() fh.close()", "label l1 = list(set(self.true_label)) numclass1 = len(l1) l2 = list(set(self.pred_label))", "metrics.f1_score(self.true_label, new_predict, average='micro') precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro') recall_micro =", "return acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro def evaluationClusterModelFromLabel(self):", "= 1e-2 * norm * F.binary_cross_entropy(x_.view(-1), adj_label, weight = weight_tensor)", "Loss1 -= KL2 return Loss, Loss1, Loss+Loss1 def generate_centers(self, emb_unconf):", "tqdm(range(epochs)) gmm = GaussianMixture(n_components = self.nClusters , covariance_type = 'diag')", "adj_label, weight_tensor, norm, z_mu, z_sigma2_log, emb, L=1): pi = self.pi", "import StepLR from preprocessing import sparse_to_tuple from sklearn.neighbors import NearestNeighbors", "= cost.__neg__().tolist() indexes = m.compute(cost) # get the match results", "recall_micro def evaluationClusterModelFromLabel(self): nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label) adjscore = metrics.adjusted_rand_score(self.true_label,", "def pretrain(self, adj, features, adj_label, y, weight_tensor, norm, epochs, lr,", "random_uniform_init(input_dim, output_dim): init_range = np.sqrt(6.0 / (input_dim + output_dim)) initial", "self.pi.data = torch.from_numpy(gmm.weights_) self.mu_c.data = torch.from_numpy(gmm.means_) self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_)) self.logstd", "= cm.evaluationClusterModelFromLabel() elbo_loss.backward() opti.step() lr_s.step() def gaussian_pdfs_log(self,x,mus,log_sigma2s): G=[] for c", "for j in adj_k: if np.isin(j, unconf_indices) and (np.isin(idx[i], adj_k))", "pi = self.pi mu_c = self.mu_c log_sigma2_c = self.log_sigma2_c det", "dataset + '/pretrain/model.pk')) opti = Adam(self.parameters(), lr=lr, weight_decay = 0.089)", "y_pred) acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro = cm.evaluationClusterModelFromLabel()", "previous_unconflicted = unconflicted_ind else : epoch_stable += 1 z_mu =", "adj): x = inputs x = torch.mm(x,self.weight) x = torch.mm(adj,", "nn.kneighbors(self.mu_c.detach().numpy()) return indices[y_pred] def update_graph(self, adj, labels, emb, unconf_indices, conf_indices):", "c2 = l2[indexes[i][1]] # ai is the index with label==c2", "f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro,", "adj = adj.tocsr() adj_label = adj + sp.eye(adj.shape[0]) adj_label =", "# @Paper : Rethinking Graph Autoencoder Models for Attributed Graph", "parameters self.base_gcn = GraphConvSparse( self.num_features, self.num_neurons) self.gcn_mean = GraphConvSparse( self.num_neurons,", "(y_pred[k] == y_pred[idx[i]]) : adj[k, idx[i]] = 1 for j", "+ output_dim)) initial = torch.rand(input_dim, output_dim)*2*init_range - init_range return nn.Parameter(initial)", "self.nClusters , covariance_type = 'diag') for _ in epoch_bar: opti.zero_grad()", "= GraphConvSparse( self.num_features, self.num_neurons) self.gcn_mean = GraphConvSparse( self.num_neurons, self.embedding_size, activation", "conf_indices = np.asarray(conf_indices, dtype=int) return unconf_indices, conf_indices class clustering_metrics(): def", "return unconf_indices, conf_indices class clustering_metrics(): def __init__(self, true_label, predict_label): self.true_label", "self.mu_c, beta1, beta2) if epoch == 0: adj, adj_label, weight_tensor", "def generate_unconflicted_data_index(emb, centers_emb, beta1, beta2): unconf_indices = [] conf_indices =", "adj_label = sparse_to_tuple(adj_label) adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T), torch.FloatTensor(adj_label[1]), torch.Size(adj_label[2])) weight_mask =", "return nn.Parameter(initial) def q_mat(X, centers, alpha=1.0): X = X.detach().numpy() centers", "metrics.f1_score(self.true_label, new_predict, average='macro') precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro') recall_macro =", "average='micro') return acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro def", "conflicted_ind previous_unconflicted = unconflicted_ind else : epoch_stable += 1 z_mu", "0 and epoch <= 200 : adj, adj_label, weight_tensor =", "z_sigma2_log, emb_unconf) epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy())) y_pred = self.predict(emb) cm = clustering_metrics(y, y_pred)", "self.logstd = self.gcn_logstddev(hidden, adj) gaussian_noise = torch.randn(x_features.size(0), self.embedding_size) sampled_z =", "0.089) lr_s = StepLR(opti, step_size=10, gamma=0.9) import os, csv epoch_bar", "= np.asarray(conf_indices, dtype=int) return unconf_indices, conf_indices class clustering_metrics(): def __init__(self,", "algorithm m = Munkres() cost = cost.__neg__().tolist() indexes = m.compute(cost)", "(confidence1[i]) > beta1 and (confidence1[i] - confidence2[i]) > beta2: unconf_indices.append(i)", "def __init__(self, input_dim, output_dim, activation = F.relu, **kwargs): super(GraphConvSparse, self).__init__(**kwargs)", "/ np.sum(q, axis=1)) return q def generate_unconflicted_data_index(emb, centers_emb, beta1, beta2):", "c def predict(self, z): pi = self.pi log_sigma2_c = self.log_sigma2_c", "(z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1)) Loss1 = KL1 KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1)) Loss1 -= KL2 return", "and predict label l1 = list(set(self.true_label)) numclass1 = len(l1) l2", "j] = 0 adj = adj.tocsr() adj_label = adj +", "def __init__(self, true_label, predict_label): self.true_label = true_label self.pred_label = predict_label", "unconf_indices = [] conf_indices = [] q = q_mat(emb, centers_emb,", "list(set(self.true_label)) numclass1 = len(l1) l2 = list(set(self.pred_label)) numclass2 = len(l2)", "= yita_c.detach().numpy() return np.argmax(yita, axis=1) def encode(self, x_features, adj): hidden", "unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2) if epoch ==", "label==c2 in the pred_label list ai = [ind for ind,", "'a') fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f'", "torch.nn.functional as F from tqdm import tqdm from torch.optim import", "for j, c2 in enumerate(l2): mps_d = [i1 for i1", "adj) gaussian_noise = torch.randn(x_features.size(0), self.embedding_size) sampled_z = gaussian_noise * torch.exp(self.logstd)", "idx = unconf_indices[self.generate_centers(emb_unconf)] for i, k in enumerate(unconf_indices): adj_k =", "self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) def pretrain(self,", "= self.encode(features, adj_norm) x_ = self.decode(emb) unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb,", "np.zeros((numclass1, numclass2), dtype=int) for i, c1 in enumerate(l1): mps =", "== 0 and epoch <= 200 : adj, adj_label, weight_tensor", "enumerate(l1): mps = [i1 for i1, e1 in enumerate(self.true_label) if", "previous_conflicted = [] epoch_stable = 0 for epoch in epoch_bar:", "self.embedding_size, activation = lambda x:x) # GMM training parameters self.pi", "= open('recoder.txt', 'a') fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f,", "nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro class GraphConvSparse(nn.Module): def __init__(self,", "/ adj.sum() weight_tensor[weight_mask] = pos_weight_orig return adj, adj_label, weight_tensor def", "import os, csv epoch_bar = tqdm(range(epochs)) previous_unconflicted = [] previous_conflicted", "self.predict(emb) emb_unconf = emb[unconf_indices] adj = adj.tolil() idx = unconf_indices[self.generate_centers(emb_unconf)]", "self.log_sigma2_c mu_c = self.mu_c det = 1e-2 yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det", "torch.nn as nn import scipy.sparse as sp import torch.nn.functional as", "q_mat(X, centers, alpha=1.0): X = X.detach().numpy() centers = centers.detach().numpy() if", "norm, z_mu, z_sigma2_log, emb, L=1): pi = self.pi mu_c =", "len(unconflicted_ind) : z_mu = z_mu[unconflicted_ind] z_sigma2_log = z_sigma2_log[unconflicted_ind] emb_unconf =", "for i, c in enumerate(l1): # correponding label in l2:", "adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label) acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro,", "self.activation = activation def forward(self, inputs, adj): x = inputs", "adj_label, weight_tensor def train(self, adj_norm, adj, features, y, norm, epochs,", "+= 1 z_mu = z_mu[previous_unconflicted] z_sigma2_log = z_sigma2_log[previous_unconflicted] emb_unconf =", "for c in range(self.nClusters): G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1)) return torch.cat(G,1) def gaussian_pdf_log(self,x,mu,log_sigma2): c", "emb, unconf_indices, conf_indices): k = 0 y_pred = self.predict(emb) emb_unconf", "norm * F.binary_cross_entropy(x_.view(-1), adj_label, weight = weight_tensor) Loss = Loss", "self.mean = self.gcn_mean(hidden, adj) self.logstd = self.gcn_logstddev(hidden, adj) gaussian_noise =", "import torch import numpy as np import torch.nn as nn", "= torch.from_numpy(gmm.means_) self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_)) self.logstd = self.mean def ELBO_Loss(self,", "numclass2), dtype=int) for i, c1 in enumerate(l1): mps = [i1", "yita_c / (yita_c.sum(1).view(-1,1)) KL1 = 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+ torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+ (z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1))", "if np.isin(j, unconf_indices) and (np.isin(idx[i], adj_k)) and (y_pred[k] != y_pred[j]):", "clustering results by Munkres algorithm m = Munkres() cost =", "0 beta1 = beta1 * 0.96 beta2 = beta2 *", "precision_macro, f1_micro, precision_micro = cm.evaluationClusterModelFromLabel() elbo_loss.backward() opti.step() lr_s.step() def gaussian_pdfs_log(self,x,mus,log_sigma2s):", ": <NAME> (<EMAIL>) & <NAME> (<EMAIL>) # @Paper : Rethinking", "= metrics.accuracy_score(self.true_label, new_predict) f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro') precision_macro =", "recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro') return acc, f1_macro, precision_macro, recall_macro,", "list ai = [ind for ind, elm in enumerate(self.pred_label) if", "KL1 KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1)) Loss1 -= KL2 return Loss, Loss1, Loss+Loss1", "= emb[conflicted_ind] previous_conflicted = conflicted_ind previous_unconflicted = unconflicted_ind else :", "munkres import Munkres def random_uniform_init(input_dim, output_dim): init_range = np.sqrt(6.0 /", "f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc,", "GaussianMixture(n_components = self.nClusters , covariance_type = 'diag') for _ in", "beta2) if epoch == 0: adj, adj_label, weight_tensor = self.update_graph(adj,", "fh = open('recoder.txt', 'a') fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f,", "= metrics.precision_score(self.true_label, new_predict, average='macro') recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro') f1_micro", "(y_pred[k] != y_pred[j]): adj[k, j] = 0 adj = adj.tocsr()", "confidence1 = q.max(1) confidence2 = np.zeros((q.shape[0],)) a = np.argsort(q, axis=1)", "= [] previous_conflicted = [] epoch_stable = 0 for epoch", "opti.step() gmm.fit_predict(z.detach().numpy()) self.pi.data = torch.from_numpy(gmm.weights_) self.mu_c.data = torch.from_numpy(gmm.means_) self.log_sigma2_c.data =", "self.predict(emb) cm = clustering_metrics(y, y_pred) acc, nmi, adjscore, f1_macro, precision_macro,", "from torch.optim.lr_scheduler import StepLR from preprocessing import sparse_to_tuple from sklearn.neighbors", "kwargs['nClusters'] # VGAE training parameters self.base_gcn = GraphConvSparse( self.num_features, self.num_neurons)", "z_mu[previous_unconflicted] z_sigma2_log = z_sigma2_log[previous_unconflicted] emb_unconf = emb[previous_unconflicted] emb_conf = emb[previous_conflicted]", "pi = self.pi log_sigma2_c = self.log_sigma2_c mu_c = self.mu_c det", "* torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1) return c def predict(self, z): pi = self.pi", "self.base_gcn(x_features, adj) self.mean = self.gcn_mean(hidden, adj) self.logstd = self.gcn_logstddev(hidden, adj)", "= self.log_sigma2_c det = 1e-2 Loss = 1e-2 * norm", "= pos_weight_orig return adj, adj_label, weight_tensor def train(self, adj_norm, adj,", "x_ = self.decode(emb) unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2)", "fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' %", "ReGMM_VGAE(nn.Module): def __init__(self, **kwargs): super(ReGMM_VGAE, self).__init__() self.num_neurons = kwargs['num_neurons'] self.num_features", "conf_indices.append(i) unconf_indices = np.asarray(unconf_indices, dtype=int) conf_indices = np.asarray(conf_indices, dtype=int) return", "beta2, save_path, dataset): self.load_state_dict(torch.load(save_path + dataset + '/pretrain/model.pk')) opti =", "= centers.detach().numpy() if X.size == 0: q = np.array([]) else:", "= q_mat(emb, centers_emb, alpha=1.0) confidence1 = q.max(1) confidence2 = np.zeros((q.shape[0],))", "x) outputs = self.activation(x) return outputs class ReGMM_VGAE(nn.Module): def __init__(self,", "norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight = weight_tensor) loss.backward() opti.step() gmm.fit_predict(z.detach().numpy()) self.pi.data =", "self.pred_label) adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label) acc, f1_macro, precision_macro, recall_macro, f1_micro,", "ELBO_Loss(self, features, adj, x_, adj_label, weight_tensor, norm, z_mu, z_sigma2_log, emb,", "def random_uniform_init(input_dim, output_dim): init_range = np.sqrt(6.0 / (input_dim + output_dim))", "else: q = 1.0 / (1.0 + (np.sum(np.square(np.expand_dims(X, 1) -", "opti.step() lr_s.step() def gaussian_pdfs_log(self,x,mus,log_sigma2s): G=[] for c in range(self.nClusters): G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1))", "save_path, dataset): opti = Adam(self.parameters(), lr=lr) epoch_bar = tqdm(range(epochs)) gmm", "c acc = metrics.accuracy_score(self.true_label, new_predict) f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro')", ">= 15: epoch_stable = 0 beta1 = beta1 * 0.96", "-0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1) return c def predict(self, z): pi =", "GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x) self.gcn_logstddev = GraphConvSparse(", "= adj + sp.eye(adj.shape[0]) adj_label = sparse_to_tuple(adj_label) adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T),", "nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro = cm.evaluationClusterModelFromLabel() elbo_loss.backward() opti.step()", "mapping between true_label and predict label l1 = list(set(self.true_label)) numclass1", "precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro,", "class GraphConvSparse(nn.Module): def __init__(self, input_dim, output_dim, activation = F.relu, **kwargs):", "= emb[previous_unconflicted] emb_conf = emb[previous_conflicted] if epoch_stable >= 15: epoch_stable", "axis=1) def encode(self, x_features, adj): hidden = self.base_gcn(x_features, adj) self.mean", "= self.gcn_logstddev(hidden, adj) gaussian_noise = torch.randn(x_features.size(0), self.embedding_size) sampled_z = gaussian_noise", "+ self.mean return self.mean, self.logstd ,sampled_z @staticmethod def decode(z): A_pred", "y_pred = self.predict(emb) cm = clustering_metrics(y, y_pred) acc, nmi, adjscore,", "def encode(self, x_features, adj): hidden = self.base_gcn(x_features, adj) self.mean =", "- centers), axis=2) / alpha)) q = q ** ((alpha", "Clustering # @License : MIT License import torch import numpy", "opti.zero_grad() _,_, z = self.encode(features, adj) x_ = self.decode(z) loss", "= metrics.recall_score(self.true_label, new_predict, average='macro') f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro') precision_micro", "gaussian_pdfs_log(self,x,mus,log_sigma2s): G=[] for c in range(self.nClusters): G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1)) return torch.cat(G,1) def", "GraphConvSparse( self.num_features, self.num_neurons) self.gcn_mean = GraphConvSparse( self.num_neurons, self.embedding_size, activation =", "print('Class Not equal, Error!!!!') return 0 cost = np.zeros((numclass1, numclass2),", "F.relu, **kwargs): super(GraphConvSparse, self).__init__(**kwargs) self.weight = random_uniform_init(input_dim, output_dim) self.activation =", "average='macro') precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro') recall_macro = metrics.recall_score(self.true_label, new_predict,", "from tqdm import tqdm from torch.optim import Adam from sklearn.mixture", "e1 == c1] for j, c2 in enumerate(l2): mps_d =", "l2[indexes[i][1]] # ai is the index with label==c2 in the", "true_label, predict_label): self.true_label = true_label self.pred_label = predict_label def clusteringAcc(self):", "Munkres algorithm m = Munkres() cost = cost.__neg__().tolist() indexes =", "cost[i][j] = len(mps_d) # match two clustering results by Munkres", "0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+ torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+ (z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1)) Loss1 = KL1 KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1))", "__init__(self, true_label, predict_label): self.true_label = true_label self.pred_label = predict_label def", "adjscore)) fh = open('recoder.txt', 'a') fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f,", "z = self.encode(features, adj) x_ = self.decode(z) loss = norm*F.binary_cross_entropy(x_.view(-1),", "between true_label and predict label l1 = list(set(self.true_label)) numclass1 =", "+ (np.sum(np.square(np.expand_dims(X, 1) - centers), axis=2) / alpha)) q =", "open('recoder.txt', 'a') fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f,", "nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) def pretrain(self, adj, features, adj_label, y, weight_tensor, norm,", "activation = lambda x:x) # GMM training parameters self.pi =", "0 adj = adj.tocsr() adj_label = adj + sp.eye(adj.shape[0]) adj_label", "pos_weight_orig return adj, adj_label, weight_tensor def train(self, adj_norm, adj, features,", "* torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+ torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+ (z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1)) Loss1 = KL1 KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1)) Loss1", "f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro def evaluationClusterModelFromLabel(self): nmi =", "metrics.normalized_mutual_info_score(self.true_label, self.pred_label) adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label) acc, f1_macro, precision_macro, recall_macro,", "get the match results new_predict = np.zeros(len(self.pred_label)) for i, c", "= np.argsort(q, axis=1) for i in range(q.shape[0]): confidence1[i] = q[i,a[i,-1]]", "import torch.nn as nn import scipy.sparse as sp import torch.nn.functional", "opti = Adam(self.parameters(), lr=lr) epoch_bar = tqdm(range(epochs)) gmm = GaussianMixture(n_components", "adj.sum() weight_tensor[weight_mask] = pos_weight_orig return adj, adj_label, weight_tensor def train(self,", "new_predict, average='micro') precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro') recall_micro = metrics.recall_score(self.true_label,", "generate_centers(self, emb_unconf): y_pred = self.predict(emb_unconf) nn = NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy())", "== c2] cost[i][j] = len(mps_d) # match two clustering results", "<NAME> (<EMAIL>) # @Paper : Rethinking Graph Autoencoder Models for", "precision_macro, recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc() print('ACC=%f, f1_macro=%f, precision_macro=%f,", "if epoch == 0: adj, adj_label, weight_tensor = self.update_graph(adj, y,", "= beta1 * 0.96 beta2 = beta2 * 0.98 if", "emb = self.encode(features, adj_norm) x_ = self.decode(emb) unconflicted_ind, conflicted_ind =", "(input_dim + output_dim)) initial = torch.rand(input_dim, output_dim)*2*init_range - init_range return", "= norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight = weight_tensor) loss.backward() opti.step() gmm.fit_predict(z.detach().numpy()) self.pi.data", "KL1 = 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+ torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+ (z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1)) Loss1 = KL1", "adj_label.to_dense().view(-1), weight = weight_tensor) loss.backward() opti.step() gmm.fit_predict(z.detach().numpy()) self.pi.data = torch.from_numpy(gmm.weights_)", "mu_c = self.mu_c det = 1e-2 yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det yita", "= tqdm(range(epochs)) gmm = GaussianMixture(n_components = self.nClusters , covariance_type =", "torch.optim.lr_scheduler import StepLR from preprocessing import sparse_to_tuple from sklearn.neighbors import", "self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) loss, loss1, elbo_loss = self.ELBO_Loss(features,", "= self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor, norm, z_mu , z_sigma2_log,", "k = 0 y_pred = self.predict(emb) emb_unconf = emb[unconf_indices] adj", "predict(self, z): pi = self.pi log_sigma2_c = self.log_sigma2_c mu_c =", "adj_label, weight = weight_tensor) Loss = Loss * features.size(0) yita_c", "z): pi = self.pi log_sigma2_c = self.log_sigma2_c mu_c = self.mu_c", "self.logstd = self.mean def ELBO_Loss(self, features, adj, x_, adj_label, weight_tensor,", "0 for epoch in epoch_bar: opti.zero_grad() z_mu, z_sigma2_log, emb =", "F.binary_cross_entropy(x_.view(-1), adj_label, weight = weight_tensor) Loss = Loss * features.size(0)", "* torch.exp(self.logstd) + self.mean return self.mean, self.logstd ,sampled_z @staticmethod def", "fh.flush() fh.close() return acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro", "yita_c.detach().numpy() return np.argmax(yita, axis=1) def encode(self, x_features, adj): hidden =", "c in enumerate(l1): # correponding label in l2: c2 =", "return self.mean, self.logstd ,sampled_z @staticmethod def decode(z): A_pred = torch.sigmoid(torch.matmul(z,z.t()))", "= random_uniform_init(input_dim, output_dim) self.activation = activation def forward(self, inputs, adj):", "= [i1 for i1, e1 in enumerate(self.true_label) if e1 ==", "Attributed Graph Clustering # @License : MIT License import torch", "q[i,a[i,-2]] if (confidence1[i]) > beta1 and (confidence1[i] - confidence2[i]) >", "and (np.isin(idx[i], adj_k)) and (y_pred[k] != y_pred[j]): adj[k, j] =", "def gaussian_pdf_log(self,x,mu,log_sigma2): c = -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1) return c def", "self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) def pretrain(self, adj, features, adj_label, y,", "= metrics.f1_score(self.true_label, new_predict, average='macro') precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro') recall_macro", "new_predict[ai] = c acc = metrics.accuracy_score(self.true_label, new_predict) f1_macro = metrics.f1_score(self.true_label,", "(<EMAIL>) & <NAME> (<EMAIL>) # @Paper : Rethinking Graph Autoencoder", "= kwargs['embedding_size'] self.nClusters = kwargs['nClusters'] # VGAE training parameters self.base_gcn", "/ (input_dim + output_dim)) initial = torch.rand(input_dim, output_dim)*2*init_range - init_range", "inputs x = torch.mm(x,self.weight) x = torch.mm(adj, x) outputs =", "epoch in epoch_bar: opti.zero_grad() z_mu, z_sigma2_log, emb = self.encode(features, adj_norm)", "weight_tensor = torch.ones(weight_mask.size(0)) pos_weight_orig = float(adj.shape[0] * adj.shape[0] - adj.sum())", "0.98 if epoch % 50 == 0 and epoch <=", "self.mean, self.logstd ,sampled_z @staticmethod def decode(z): A_pred = torch.sigmoid(torch.matmul(z,z.t())) return", "precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore) ) fh.write('\\r\\n') fh.flush()", "features.size(0) yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det yita_c = yita_c / (yita_c.sum(1).view(-1,1)) KL1", "enumerate(self.pred_label) if elm == c2] new_predict[ai] = c acc =", "y_pred[j]): adj[k, j] = 0 adj = adj.tocsr() adj_label =", "epoch_stable >= 15: epoch_stable = 0 beta1 = beta1 *", "adj, x_, adj_label, weight_tensor, norm, z_mu, z_sigma2_log, emb, L=1): pi", "features, y, norm, epochs, lr, beta1, beta2, save_path, dataset): self.load_state_dict(torch.load(save_path", "list(set(self.pred_label)) numclass2 = len(l2) if numclass1 != numclass2: print('Class Not", "if X.size == 0: q = np.array([]) else: q =", "j, c2 in enumerate(l2): mps_d = [i1 for i1 in", "new_predict = np.zeros(len(self.pred_label)) for i, c in enumerate(l1): # correponding", "self.embedding_size, activation = lambda x:x) self.gcn_logstddev = GraphConvSparse( self.num_neurons, self.embedding_size,", "torch.from_numpy(gmm.means_) self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_)) self.logstd = self.mean def ELBO_Loss(self, features,", "adj.sum()) / adj.sum() weight_tensor[weight_mask] = pos_weight_orig return adj, adj_label, weight_tensor", "epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy())) y_pred = self.predict(emb) cm = clustering_metrics(y, y_pred) acc, nmi,", "self.activation(x) return outputs class ReGMM_VGAE(nn.Module): def __init__(self, **kwargs): super(ReGMM_VGAE, self).__init__()", "lr=lr, weight_decay = 0.089) lr_s = StepLR(opti, step_size=10, gamma=0.9) import", "= emb[previous_conflicted] if epoch_stable >= 15: epoch_stable = 0 beta1", "f1_macro, precision_macro, f1_micro, precision_micro class GraphConvSparse(nn.Module): def __init__(self, input_dim, output_dim,", "= 0 y_pred = self.predict(emb) emb_unconf = emb[unconf_indices] adj =", "from munkres import Munkres def random_uniform_init(input_dim, output_dim): init_range = np.sqrt(6.0", "self.decode(z) loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight = weight_tensor) loss.backward() opti.step()", "self.gcn_logstddev = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x) #", "50 == 0 and epoch <= 200 : adj, adj_label,", "emb_unconf) epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy())) y_pred = self.predict(emb) cm = clustering_metrics(y, y_pred) acc,", "float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() weight_tensor[weight_mask] = pos_weight_orig", "weight_tensor, norm, z_mu , z_sigma2_log, emb_unconf) epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy())) y_pred = self.predict(emb)", "cost = np.zeros((numclass1, numclass2), dtype=int) for i, c1 in enumerate(l1):", "unconf_indices, conf_indices): k = 0 y_pred = self.predict(emb) emb_unconf =", "self.gcn_logstddev(hidden, adj) gaussian_noise = torch.randn(x_features.size(0), self.embedding_size) sampled_z = gaussian_noise *", "[ind for ind, elm in enumerate(self.pred_label) if elm == c2]", "algorithm='ball_tree').fit(emb_unconf.detach().numpy()) _, indices = nn.kneighbors(self.mu_c.detach().numpy()) return indices[y_pred] def update_graph(self, adj,", "% (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore))", "L=1): pi = self.pi mu_c = self.mu_c log_sigma2_c = self.log_sigma2_c", "** ((alpha + 1.0) / 2.0) q = np.transpose(np.transpose(q) /", "self.mean def ELBO_Loss(self, features, adj, x_, adj_label, weight_tensor, norm, z_mu,", "x:x) self.gcn_logstddev = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x)", "hidden = self.base_gcn(x_features, adj) self.mean = self.gcn_mean(hidden, adj) self.logstd =", "conf_indices): k = 0 y_pred = self.predict(emb) emb_unconf = emb[unconf_indices]", "dataset): opti = Adam(self.parameters(), lr=lr) epoch_bar = tqdm(range(epochs)) gmm =", "nn import scipy.sparse as sp import torch.nn.functional as F from", "nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) def pretrain(self, adj, features,", "q = q_mat(emb, centers_emb, alpha=1.0) confidence1 = q.max(1) confidence2 =", "recall_micro, nmi, adjscore) ) fh.write('\\r\\n') fh.flush() fh.close() return acc, nmi,", "encode(self, x_features, adj): hidden = self.base_gcn(x_features, adj) self.mean = self.gcn_mean(hidden,", "l2: c2 = l2[indexes[i][1]] # ai is the index with", "for epoch in epoch_bar: opti.zero_grad() z_mu, z_sigma2_log, emb = self.encode(features,", "15: epoch_stable = 0 beta1 = beta1 * 0.96 beta2", "self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True) self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) self.log_sigma2_c =", "z_sigma2_log[previous_unconflicted] emb_unconf = emb[previous_unconflicted] emb_conf = emb[previous_conflicted] if epoch_stable >=", "= np.zeros((numclass1, numclass2), dtype=int) for i, c1 in enumerate(l1): mps", "= np.transpose(np.transpose(q) / np.sum(q, axis=1)) return q def generate_unconflicted_data_index(emb, centers_emb,", "norm, epochs, lr, save_path, dataset): opti = Adam(self.parameters(), lr=lr) epoch_bar", "Not equal, Error!!!!') return 0 cost = np.zeros((numclass1, numclass2), dtype=int)", "metrics from munkres import Munkres def random_uniform_init(input_dim, output_dim): init_range =", "adj, labels, emb, unconf_indices, conf_indices): k = 0 y_pred =", "tqdm(range(epochs)) previous_unconflicted = [] previous_conflicted = [] epoch_stable = 0", "self.pred_label[i1] == c2] cost[i][j] = len(mps_d) # match two clustering", "1.0) / 2.0) q = np.transpose(np.transpose(q) / np.sum(q, axis=1)) return", "NearestNeighbors from sklearn import metrics from munkres import Munkres def", "self.gcn_mean(hidden, adj) self.logstd = self.gcn_logstddev(hidden, adj) gaussian_noise = torch.randn(x_features.size(0), self.embedding_size)", "from preprocessing import sparse_to_tuple from sklearn.neighbors import NearestNeighbors from sklearn", "training parameters self.base_gcn = GraphConvSparse( self.num_features, self.num_neurons) self.gcn_mean = GraphConvSparse(", "self.num_features = kwargs['num_features'] self.embedding_size = kwargs['embedding_size'] self.nClusters = kwargs['nClusters'] #", "self.pi mu_c = self.mu_c log_sigma2_c = self.log_sigma2_c det = 1e-2", "emb, L=1): pi = self.pi mu_c = self.mu_c log_sigma2_c =", "mps_d = [i1 for i1 in mps if self.pred_label[i1] ==", "gaussian_pdf_log(self,x,mu,log_sigma2): c = -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1) return c def predict(self,", "x_features, adj): hidden = self.base_gcn(x_features, adj) self.mean = self.gcn_mean(hidden, adj)", "= torch.mm(x,self.weight) x = torch.mm(adj, x) outputs = self.activation(x) return", "gaussian_noise * torch.exp(self.logstd) + self.mean return self.mean, self.logstd ,sampled_z @staticmethod", "= len(mps_d) # match two clustering results by Munkres algorithm", "output_dim, activation = F.relu, **kwargs): super(GraphConvSparse, self).__init__(**kwargs) self.weight = random_uniform_init(input_dim,", "emb[unconflicted_ind] emb_conf = emb[conflicted_ind] previous_conflicted = conflicted_ind previous_unconflicted = unconflicted_ind", "recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro,", "return 0 cost = np.zeros((numclass1, numclass2), dtype=int) for i, c1", "train(self, adj_norm, adj, features, y, norm, epochs, lr, beta1, beta2,", "beta1, beta2): unconf_indices = [] conf_indices = [] q =", "import Munkres def random_uniform_init(input_dim, output_dim): init_range = np.sqrt(6.0 / (input_dim", "= l2[indexes[i][1]] # ai is the index with label==c2 in", "random_uniform_init(input_dim, output_dim) self.activation = activation def forward(self, inputs, adj): x", "adj) x_ = self.decode(z) loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight =", "in the pred_label list ai = [ind for ind, elm", "f1_macro, precision_macro, f1_micro, precision_micro = cm.evaluationClusterModelFromLabel() elbo_loss.backward() opti.step() lr_s.step() def", "q def generate_unconflicted_data_index(emb, centers_emb, beta1, beta2): unconf_indices = [] conf_indices", "= np.asarray(unconf_indices, dtype=int) conf_indices = np.asarray(conf_indices, dtype=int) return unconf_indices, conf_indices", "/ 2.0) q = np.transpose(np.transpose(q) / np.sum(q, axis=1)) return q", "weight_tensor, norm, z_mu, z_sigma2_log, emb, L=1): pi = self.pi mu_c", "y_pred = self.predict(emb) emb_unconf = emb[unconf_indices] adj = adj.tolil() idx", "emb, unconflicted_ind, conflicted_ind) loss, loss1, elbo_loss = self.ELBO_Loss(features, adj_norm, x_,", "StepLR from preprocessing import sparse_to_tuple from sklearn.neighbors import NearestNeighbors from", "scipy.sparse as sp import torch.nn.functional as F from tqdm import", "len(l1) l2 = list(set(self.pred_label)) numclass2 = len(l2) if numclass1 !=", "emb_conf = emb[conflicted_ind] previous_conflicted = conflicted_ind previous_unconflicted = unconflicted_ind else", "y, norm, epochs, lr, beta1, beta2, save_path, dataset): self.load_state_dict(torch.load(save_path +", "= Loss * features.size(0) yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det yita_c = yita_c", "beta2 = beta2 * 0.98 if epoch % 50 ==", "torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1)) Loss1 -= KL2 return Loss, Loss1, Loss+Loss1 def generate_centers(self,", "= lambda x:x) # GMM training parameters self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters,", "self.encode(features, adj) x_ = self.decode(z) loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight", "np.zeros(len(self.pred_label)) for i, c in enumerate(l1): # correponding label in", "= torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det yita = yita_c.detach().numpy() return np.argmax(yita, axis=1) def encode(self,", "adj): hidden = self.base_gcn(x_features, adj) self.mean = self.gcn_mean(hidden, adj) self.logstd", "@License : MIT License import torch import numpy as np", "Adam from sklearn.mixture import GaussianMixture from torch.optim.lr_scheduler import StepLR from", "sparse_to_tuple from sklearn.neighbors import NearestNeighbors from sklearn import metrics from", "import Adam from sklearn.mixture import GaussianMixture from torch.optim.lr_scheduler import StepLR", "= self.clusteringAcc() print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f,", "GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x) # GMM training", "MIT License import torch import numpy as np import torch.nn", "epochs, lr, beta1, beta2, save_path, dataset): self.load_state_dict(torch.load(save_path + dataset +", "1 weight_tensor = torch.ones(weight_mask.size(0)) pos_weight_orig = float(adj.shape[0] * adj.shape[0] -", "index with label==c2 in the pred_label list ai = [ind", "adj, features, adj_label, y, weight_tensor, norm, epochs, lr, save_path, dataset):", "numclass1 = len(l1) l2 = list(set(self.pred_label)) numclass2 = len(l2) if", "for i, k in enumerate(unconf_indices): adj_k = adj[k].tocsr().indices if not(np.isin(idx[i],", "= metrics.normalized_mutual_info_score(self.true_label, self.pred_label) adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label) acc, f1_macro, precision_macro,", "lr_s = StepLR(opti, step_size=10, gamma=0.9) import os, csv epoch_bar =", "ai is the index with label==c2 in the pred_label list", "self.decode(emb) unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2) if epoch", "predict_label): self.true_label = true_label self.pred_label = predict_label def clusteringAcc(self): #", "numclass2: print('Class Not equal, Error!!!!') return 0 cost = np.zeros((numclass1,", "in mps if self.pred_label[i1] == c2] cost[i][j] = len(mps_d) #", "outputs class ReGMM_VGAE(nn.Module): def __init__(self, **kwargs): super(ReGMM_VGAE, self).__init__() self.num_neurons =", "Loss, Loss1, Loss+Loss1 def generate_centers(self, emb_unconf): y_pred = self.predict(emb_unconf) nn", "X = X.detach().numpy() centers = centers.detach().numpy() if X.size == 0:", "return c def predict(self, z): pi = self.pi log_sigma2_c =", "np.isin(j, unconf_indices) and (np.isin(idx[i], adj_k)) and (y_pred[k] != y_pred[j]): adj[k,", "= [] conf_indices = [] q = q_mat(emb, centers_emb, alpha=1.0)", "= self.mu_c det = 1e-2 yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det yita =", "epoch == 0: adj, adj_label, weight_tensor = self.update_graph(adj, y, emb,", "as sp import torch.nn.functional as F from tqdm import tqdm", "from torch.optim import Adam from sklearn.mixture import GaussianMixture from torch.optim.lr_scheduler", "X.detach().numpy() centers = centers.detach().numpy() if X.size == 0: q =", "output_dim): init_range = np.sqrt(6.0 / (input_dim + output_dim)) initial =", "= Adam(self.parameters(), lr=lr) epoch_bar = tqdm(range(epochs)) gmm = GaussianMixture(n_components =", "numclass2 = len(l2) if numclass1 != numclass2: print('Class Not equal,", "k in enumerate(unconf_indices): adj_k = adj[k].tocsr().indices if not(np.isin(idx[i], adj_k)) and", "adj[k].tocsr().indices if not(np.isin(idx[i], adj_k)) and (y_pred[k] == y_pred[idx[i]]) : adj[k,", "= len(l1) l2 = list(set(self.pred_label)) numclass2 = len(l2) if numclass1", "= adj[k].tocsr().indices if not(np.isin(idx[i], adj_k)) and (y_pred[k] == y_pred[idx[i]]) :", "z_mu, z_sigma2_log, emb, L=1): pi = self.pi mu_c = self.mu_c", "KL2 return Loss, Loss1, Loss+Loss1 def generate_centers(self, emb_unconf): y_pred =", "output_dim) self.activation = activation def forward(self, inputs, adj): x =", "elm in enumerate(self.pred_label) if elm == c2] new_predict[ai] = c", "q[i,a[i,-1]] confidence2[i] = q[i,a[i,-2]] if (confidence1[i]) > beta1 and (confidence1[i]", "import NearestNeighbors from sklearn import metrics from munkres import Munkres", "# get the match results new_predict = np.zeros(len(self.pred_label)) for i,", "torch.mm(adj, x) outputs = self.activation(x) return outputs class ReGMM_VGAE(nn.Module): def", "torch.randn(x_features.size(0), self.embedding_size) sampled_z = gaussian_noise * torch.exp(self.logstd) + self.mean return", "y_pred = self.predict(emb_unconf) nn = NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy()) _, indices", "return indices[y_pred] def update_graph(self, adj, labels, emb, unconf_indices, conf_indices): k", "= np.array([]) else: q = 1.0 / (1.0 + (np.sum(np.square(np.expand_dims(X,", "x = torch.mm(adj, x) outputs = self.activation(x) return outputs class", "= [i1 for i1 in mps if self.pred_label[i1] == c2]", "= self.base_gcn(x_features, adj) self.mean = self.gcn_mean(hidden, adj) self.logstd = self.gcn_logstddev(hidden,", "VGAE training parameters self.base_gcn = GraphConvSparse( self.num_features, self.num_neurons) self.gcn_mean =", "i, c in enumerate(l1): # correponding label in l2: c2", "pred_label list ai = [ind for ind, elm in enumerate(self.pred_label)", "epoch_bar = tqdm(range(epochs)) previous_unconflicted = [] previous_conflicted = [] epoch_stable", "z_mu[unconflicted_ind] z_sigma2_log = z_sigma2_log[unconflicted_ind] emb_unconf = emb[unconflicted_ind] emb_conf = emb[conflicted_ind]", "norm, z_mu , z_sigma2_log, emb_unconf) epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy())) y_pred = self.predict(emb) cm", ": adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind)", "emb_unconf = emb[unconf_indices] adj = adj.tolil() idx = unconf_indices[self.generate_centers(emb_unconf)] for", "in l2: c2 = l2[indexes[i][1]] # ai is the index", "q = np.array([]) else: q = 1.0 / (1.0 +", "beta2 * 0.98 if epoch % 50 == 0 and", "= nn.kneighbors(self.mu_c.detach().numpy()) return indices[y_pred] def update_graph(self, adj, labels, emb, unconf_indices,", "fh.write('\\r\\n') fh.flush() fh.close() return acc, nmi, adjscore, f1_macro, precision_macro, f1_micro,", "cost.__neg__().tolist() indexes = m.compute(cost) # get the match results new_predict", "centers_emb, beta1, beta2): unconf_indices = [] conf_indices = [] q", "confidence2[i]) > beta2: unconf_indices.append(i) else: conf_indices.append(i) unconf_indices = np.asarray(unconf_indices, dtype=int)", "Munkres def random_uniform_init(input_dim, output_dim): init_range = np.sqrt(6.0 / (input_dim +", "= c acc = metrics.accuracy_score(self.true_label, new_predict) f1_macro = metrics.f1_score(self.true_label, new_predict,", "self).__init__() self.num_neurons = kwargs['num_neurons'] self.num_features = kwargs['num_features'] self.embedding_size = kwargs['embedding_size']", "Rethinking Graph Autoencoder Models for Attributed Graph Clustering # @License", "self.num_neurons) self.gcn_mean = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x)", "= nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True) self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters,", "if not(np.isin(idx[i], adj_k)) and (y_pred[k] == y_pred[idx[i]]) : adj[k, idx[i]]", "epoch_stable = 0 beta1 = beta1 * 0.96 beta2 =", "weight = weight_tensor) Loss = Loss * features.size(0) yita_c =", "(1.0 + (np.sum(np.square(np.expand_dims(X, 1) - centers), axis=2) / alpha)) q", "in range(self.nClusters): G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1)) return torch.cat(G,1) def gaussian_pdf_log(self,x,mu,log_sigma2): c = -0.5", "acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro def evaluationClusterModelFromLabel(self): nmi", "precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro') recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro')", ": adj[k, idx[i]] = 1 for j in adj_k: if", "beta1 * 0.96 beta2 = beta2 * 0.98 if epoch", "weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) loss, loss1, elbo_loss", "precision_macro, recall_macro, f1_micro, precision_micro, recall_micro def evaluationClusterModelFromLabel(self): nmi = metrics.normalized_mutual_info_score(self.true_label,", "and (y_pred[k] == y_pred[idx[i]]) : adj[k, idx[i]] = 1 for", "adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) if", "ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi,", "= torch.rand(input_dim, output_dim)*2*init_range - init_range return nn.Parameter(initial) def q_mat(X, centers,", "== y_pred[idx[i]]) : adj[k, idx[i]] = 1 for j in", "= adj_label.to_dense().view(-1) == 1 weight_tensor = torch.ones(weight_mask.size(0)) pos_weight_orig = float(adj.shape[0]", "% (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore)", "X.size == 0: q = np.array([]) else: q = 1.0", "previous_conflicted = conflicted_ind previous_unconflicted = unconflicted_ind else : epoch_stable +=", "= self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) loss, loss1, elbo_loss =", "clustering_metrics(y, y_pred) acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro =", "centers_emb, alpha=1.0) confidence1 = q.max(1) confidence2 = np.zeros((q.shape[0],)) a =", "f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore) ) fh.write('\\r\\n')", "c2] new_predict[ai] = c acc = metrics.accuracy_score(self.true_label, new_predict) f1_macro =", "torch.log(torch.from_numpy(gmm.covariances_)) self.logstd = self.mean def ELBO_Loss(self, features, adj, x_, adj_label,", "if (confidence1[i]) > beta1 and (confidence1[i] - confidence2[i]) > beta2:", "adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) loss, loss1,", "= weight_tensor) loss.backward() opti.step() gmm.fit_predict(z.detach().numpy()) self.pi.data = torch.from_numpy(gmm.weights_) self.mu_c.data =", "adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor, norm, z_mu , z_sigma2_log, emb_unconf) epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy()))", "inputs, adj): x = inputs x = torch.mm(x,self.weight) x =", "c1] for j, c2 in enumerate(l2): mps_d = [i1 for", "= kwargs['nClusters'] # VGAE training parameters self.base_gcn = GraphConvSparse( self.num_features,", "= len(l2) if numclass1 != numclass2: print('Class Not equal, Error!!!!')", "self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) if len(previous_unconflicted) < len(unconflicted_ind) :", "acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc() print('ACC=%f,", "self.log_sigma2_c det = 1e-2 Loss = 1e-2 * norm *", "self.embedding_size),requires_grad=True) self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) def pretrain(self, adj, features, adj_label,", "1e-2 yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det yita = yita_c.detach().numpy() return np.argmax(yita, axis=1)", "f1_micro, precision_micro = cm.evaluationClusterModelFromLabel() elbo_loss.backward() opti.step() lr_s.step() def gaussian_pdfs_log(self,x,mus,log_sigma2s): G=[]", "= metrics.adjusted_rand_score(self.true_label, self.pred_label) acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro", "torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+ torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+ (z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1)) Loss1 = KL1 KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1)) Loss1 -=", "= torch.randn(x_features.size(0), self.embedding_size) sampled_z = gaussian_noise * torch.exp(self.logstd) + self.mean", "adj) self.mean = self.gcn_mean(hidden, adj) self.logstd = self.gcn_logstddev(hidden, adj) gaussian_noise", "= 1e-2 yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det yita = yita_c.detach().numpy() return np.argmax(yita,", "nmi, adjscore)) fh = open('recoder.txt', 'a') fh.write('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f,", "== 0: q = np.array([]) else: q = 1.0 /", "= [] epoch_stable = 0 for epoch in epoch_bar: opti.zero_grad()", "adj_k)) and (y_pred[k] != y_pred[j]): adj[k, j] = 0 adj", "def train(self, adj_norm, adj, features, y, norm, epochs, lr, beta1,", "_, indices = nn.kneighbors(self.mu_c.detach().numpy()) return indices[y_pred] def update_graph(self, adj, labels,", "x_, adj_label, weight_tensor, norm, z_mu, z_sigma2_log, emb, L=1): pi =", "precision_micro, recall_micro, nmi, adjscore)) fh = open('recoder.txt', 'a') fh.write('ACC=%f, f1_macro=%f,", "**kwargs): super(ReGMM_VGAE, self).__init__() self.num_neurons = kwargs['num_neurons'] self.num_features = kwargs['num_features'] self.embedding_size", "elm == c2] new_predict[ai] = c acc = metrics.accuracy_score(self.true_label, new_predict)", "@Paper : Rethinking Graph Autoencoder Models for Attributed Graph Clustering", "tqdm from torch.optim import Adam from sklearn.mixture import GaussianMixture from", "coding: utf-8 -*- # @Authors : <NAME> (<EMAIL>) & <NAME>", "# @License : MIT License import torch import numpy as", "= [] q = q_mat(emb, centers_emb, alpha=1.0) confidence1 = q.max(1)", "ai = [ind for ind, elm in enumerate(self.pred_label) if elm", "= 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+ torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+ (z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1)) Loss1 = KL1 KL2=", "c = -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1) return c def predict(self, z):", "np.array([]) else: q = 1.0 / (1.0 + (np.sum(np.square(np.expand_dims(X, 1)", "i in range(q.shape[0]): confidence1[i] = q[i,a[i,-1]] confidence2[i] = q[i,a[i,-2]] if", "lr=lr) epoch_bar = tqdm(range(epochs)) gmm = GaussianMixture(n_components = self.nClusters ,", "- init_range return nn.Parameter(initial) def q_mat(X, centers, alpha=1.0): X =", "= predict_label def clusteringAcc(self): # best mapping between true_label and", "= tqdm(range(epochs)) previous_unconflicted = [] previous_conflicted = [] epoch_stable =", "dtype=int) for i, c1 in enumerate(l1): mps = [i1 for", "Error!!!!') return 0 cost = np.zeros((numclass1, numclass2), dtype=int) for i,", "os, csv epoch_bar = tqdm(range(epochs)) previous_unconflicted = [] previous_conflicted =", "and (confidence1[i] - confidence2[i]) > beta2: unconf_indices.append(i) else: conf_indices.append(i) unconf_indices", "loss1, elbo_loss = self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor, norm, z_mu", "> beta1 and (confidence1[i] - confidence2[i]) > beta2: unconf_indices.append(i) else:", "dtype=int) conf_indices = np.asarray(conf_indices, dtype=int) return unconf_indices, conf_indices class clustering_metrics():", "preprocessing import sparse_to_tuple from sklearn.neighbors import NearestNeighbors from sklearn import", "/ (yita_c.sum(1).view(-1,1)) KL1 = 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+ torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+ (z_mu.unsqueeze(1)-mu_c.unsqueeze(0)).pow(2)/torch.exp(log_sigma2_c.unsqueeze(0)),2),1)) Loss1", "200 : adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind,", "c2 in enumerate(l2): mps_d = [i1 for i1 in mps", "weight_tensor def train(self, adj_norm, adj, features, y, norm, epochs, lr,", "beta1 = beta1 * 0.96 beta2 = beta2 * 0.98", "average='macro') recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro') f1_micro = metrics.f1_score(self.true_label, new_predict,", "from sklearn import metrics from munkres import Munkres def random_uniform_init(input_dim,", "= -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1) return c def predict(self, z): pi", "x_, adj_label.to_dense().view(-1), weight_tensor, norm, z_mu , z_sigma2_log, emb_unconf) epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy())) y_pred", "init_range return nn.Parameter(initial) def q_mat(X, centers, alpha=1.0): X = X.detach().numpy()", "GraphConvSparse(nn.Module): def __init__(self, input_dim, output_dim, activation = F.relu, **kwargs): super(GraphConvSparse,", "import torch.nn.functional as F from tqdm import tqdm from torch.optim", "self.clusteringAcc() print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f'", "= KL1 KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1)) Loss1 -= KL2 return Loss, Loss1,", "adj = adj.tolil() idx = unconf_indices[self.generate_centers(emb_unconf)] for i, k in", "import metrics from munkres import Munkres def random_uniform_init(input_dim, output_dim): init_range", "= z_mu[previous_unconflicted] z_sigma2_log = z_sigma2_log[previous_unconflicted] emb_unconf = emb[previous_unconflicted] emb_conf =", "from sklearn.mixture import GaussianMixture from torch.optim.lr_scheduler import StepLR from preprocessing", "((alpha + 1.0) / 2.0) q = np.transpose(np.transpose(q) / np.sum(q,", "in epoch_bar: opti.zero_grad() z_mu, z_sigma2_log, emb = self.encode(features, adj_norm) x_", "y, emb, unconflicted_ind, conflicted_ind) loss, loss1, elbo_loss = self.ELBO_Loss(features, adj_norm,", "f1_micro, precision_micro, recall_micro = self.clusteringAcc() print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f,", "unconflicted_ind, conflicted_ind) loss, loss1, elbo_loss = self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1),", "[] q = q_mat(emb, centers_emb, alpha=1.0) confidence1 = q.max(1) confidence2", "acc = metrics.accuracy_score(self.true_label, new_predict) f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro') precision_macro", "beta1 and (confidence1[i] - confidence2[i]) > beta2: unconf_indices.append(i) else: conf_indices.append(i)", "dtype=int) return unconf_indices, conf_indices class clustering_metrics(): def __init__(self, true_label, predict_label):", "q_mat(emb, centers_emb, alpha=1.0) confidence1 = q.max(1) confidence2 = np.zeros((q.shape[0],)) a", "j in adj_k: if np.isin(j, unconf_indices) and (np.isin(idx[i], adj_k)) and", "adj.shape[0] - adj.sum()) / adj.sum() weight_tensor[weight_mask] = pos_weight_orig return adj,", "and (y_pred[k] != y_pred[j]): adj[k, j] = 0 adj =", "adj_label.to_dense().view(-1) == 1 weight_tensor = torch.ones(weight_mask.size(0)) pos_weight_orig = float(adj.shape[0] *", "torch.ones(weight_mask.size(0)) pos_weight_orig = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum()", "pos_weight_orig = float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() weight_tensor[weight_mask]", "if epoch % 50 == 0 and epoch <= 200", "adj_norm) x_ = self.decode(emb) unconflicted_ind, conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c, beta1,", "e1 in enumerate(self.true_label) if e1 == c1] for j, c2", "= adj.tolil() idx = unconf_indices[self.generate_centers(emb_unconf)] for i, k in enumerate(unconf_indices):", "not(np.isin(idx[i], adj_k)) and (y_pred[k] == y_pred[idx[i]]) : adj[k, idx[i]] =", "np.asarray(conf_indices, dtype=int) return unconf_indices, conf_indices class clustering_metrics(): def __init__(self, true_label,", "det = 1e-2 Loss = 1e-2 * norm * F.binary_cross_entropy(x_.view(-1),", "self.embedding_size),requires_grad=True) def pretrain(self, adj, features, adj_label, y, weight_tensor, norm, epochs,", "NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro,", "features, adj_label, y, weight_tensor, norm, epochs, lr, save_path, dataset): opti", "utf-8 -*- # @Authors : <NAME> (<EMAIL>) & <NAME> (<EMAIL>)", "true_label and predict label l1 = list(set(self.true_label)) numclass1 = len(l1)", "Loss = 1e-2 * norm * F.binary_cross_entropy(x_.view(-1), adj_label, weight =", "+ dataset + '/pretrain/model.pk')) opti = Adam(self.parameters(), lr=lr, weight_decay =", "conflicted_ind = generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2) if epoch == 0:", "* 0.96 beta2 = beta2 * 0.98 if epoch %", "conf_indices class clustering_metrics(): def __init__(self, true_label, predict_label): self.true_label = true_label", "return np.argmax(yita, axis=1) def encode(self, x_features, adj): hidden = self.base_gcn(x_features,", "emb_unconf): y_pred = self.predict(emb_unconf) nn = NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy()) _,", "in range(q.shape[0]): confidence1[i] = q[i,a[i,-1]] confidence2[i] = q[i,a[i,-2]] if (confidence1[i])", "centers), axis=2) / alpha)) q = q ** ((alpha +", "np.argsort(q, axis=1) for i in range(q.shape[0]): confidence1[i] = q[i,a[i,-1]] confidence2[i]", "update_graph(self, adj, labels, emb, unconf_indices, conf_indices): k = 0 y_pred", "= kwargs['num_features'] self.embedding_size = kwargs['embedding_size'] self.nClusters = kwargs['nClusters'] # VGAE", "epoch <= 200 : adj, adj_label, weight_tensor = self.update_graph(adj, y,", "average='micro') recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro') return acc, f1_macro, precision_macro,", "@Authors : <NAME> (<EMAIL>) & <NAME> (<EMAIL>) # @Paper :", "* features.size(0) yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det yita_c = yita_c / (yita_c.sum(1).view(-1,1))", "output_dim)*2*init_range - init_range return nn.Parameter(initial) def q_mat(X, centers, alpha=1.0): X", "metrics.accuracy_score(self.true_label, new_predict) f1_macro = metrics.f1_score(self.true_label, new_predict, average='macro') precision_macro = metrics.precision_score(self.true_label,", "new_predict, average='micro') return acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro", "f1_micro, precision_micro, recall_micro def evaluationClusterModelFromLabel(self): nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label) adjscore", "labels, emb, unconf_indices, conf_indices): k = 0 y_pred = self.predict(emb)", "% 50 == 0 and epoch <= 200 : adj,", "import GaussianMixture from torch.optim.lr_scheduler import StepLR from preprocessing import sparse_to_tuple", "c1 in enumerate(l1): mps = [i1 for i1, e1 in", "torch.FloatTensor(adj_label[1]), torch.Size(adj_label[2])) weight_mask = adj_label.to_dense().view(-1) == 1 weight_tensor = torch.ones(weight_mask.size(0))", "centers = centers.detach().numpy() if X.size == 0: q = np.array([])", "Adam(self.parameters(), lr=lr, weight_decay = 0.089) lr_s = StepLR(opti, step_size=10, gamma=0.9)", "best mapping between true_label and predict label l1 = list(set(self.true_label))", "norm, epochs, lr, beta1, beta2, save_path, dataset): self.load_state_dict(torch.load(save_path + dataset", "alpha)) q = q ** ((alpha + 1.0) / 2.0)", "torch.mm(x,self.weight) x = torch.mm(adj, x) outputs = self.activation(x) return outputs", "and epoch <= 200 : adj, adj_label, weight_tensor = self.update_graph(adj,", "(acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore) )", "clustering_metrics(): def __init__(self, true_label, predict_label): self.true_label = true_label self.pred_label =", "l1 = list(set(self.true_label)) numclass1 = len(l1) l2 = list(set(self.pred_label)) numclass2", "= nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) def pretrain(self, adj,", "1, algorithm='ball_tree').fit(emb_unconf.detach().numpy()) _, indices = nn.kneighbors(self.mu_c.detach().numpy()) return indices[y_pred] def update_graph(self,", "for Attributed Graph Clustering # @License : MIT License import", "enumerate(self.true_label) if e1 == c1] for j, c2 in enumerate(l2):", "precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore)) fh = open('recoder.txt',", "# ai is the index with label==c2 in the pred_label", "yita_c = yita_c / (yita_c.sum(1).view(-1,1)) KL1 = 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+", "self.load_state_dict(torch.load(save_path + dataset + '/pretrain/model.pk')) opti = Adam(self.parameters(), lr=lr, weight_decay", "z_mu , z_sigma2_log, emb_unconf) epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy())) y_pred = self.predict(emb) cm =", "return Loss, Loss1, Loss+Loss1 def generate_centers(self, emb_unconf): y_pred = self.predict(emb_unconf)", "precision_micro, recall_micro, nmi, adjscore) ) fh.write('\\r\\n') fh.flush() fh.close() return acc,", "+ '/pretrain/model.pk')) opti = Adam(self.parameters(), lr=lr, weight_decay = 0.089) lr_s", "StepLR(opti, step_size=10, gamma=0.9) import os, csv epoch_bar = tqdm(range(epochs)) previous_unconflicted", "= activation def forward(self, inputs, adj): x = inputs x", "generate_unconflicted_data_index(emb, self.mu_c, beta1, beta2) if epoch == 0: adj, adj_label,", "enumerate(l2): mps_d = [i1 for i1 in mps if self.pred_label[i1]", "self.num_features, self.num_neurons) self.gcn_mean = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda", "0: adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind)", "Graph Clustering # @License : MIT License import torch import", "weight_tensor) Loss = Loss * features.size(0) yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det yita_c", ": z_mu = z_mu[unconflicted_ind] z_sigma2_log = z_sigma2_log[unconflicted_ind] emb_unconf = emb[unconflicted_ind]", "epoch_stable = 0 for epoch in epoch_bar: opti.zero_grad() z_mu, z_sigma2_log,", "sklearn.neighbors import NearestNeighbors from sklearn import metrics from munkres import", "torch.from_numpy(gmm.weights_) self.mu_c.data = torch.from_numpy(gmm.means_) self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_)) self.logstd = self.mean", "unconf_indices, conf_indices class clustering_metrics(): def __init__(self, true_label, predict_label): self.true_label =", "Graph Autoencoder Models for Attributed Graph Clustering # @License :", "import sparse_to_tuple from sklearn.neighbors import NearestNeighbors from sklearn import metrics", "centers, alpha=1.0): X = X.detach().numpy() centers = centers.detach().numpy() if X.size", "!= numclass2: print('Class Not equal, Error!!!!') return 0 cost =", "self.pred_label = predict_label def clusteringAcc(self): # best mapping between true_label", "== c1] for j, c2 in enumerate(l2): mps_d = [i1", "= Adam(self.parameters(), lr=lr, weight_decay = 0.089) lr_s = StepLR(opti, step_size=10,", "lr, save_path, dataset): opti = Adam(self.parameters(), lr=lr) epoch_bar = tqdm(range(epochs))", "initial = torch.rand(input_dim, output_dim)*2*init_range - init_range return nn.Parameter(initial) def q_mat(X,", "epoch_bar: opti.zero_grad() z_mu, z_sigma2_log, emb = self.encode(features, adj_norm) x_ =", "len(mps_d) # match two clustering results by Munkres algorithm m", "= torch.from_numpy(gmm.weights_) self.mu_c.data = torch.from_numpy(gmm.means_) self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_)) self.logstd =", "precision_micro = cm.evaluationClusterModelFromLabel() elbo_loss.backward() opti.step() lr_s.step() def gaussian_pdfs_log(self,x,mus,log_sigma2s): G=[] for", "nn = NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy()) _, indices = nn.kneighbors(self.mu_c.detach().numpy()) return", "= 1e-2 Loss = 1e-2 * norm * F.binary_cross_entropy(x_.view(-1), adj_label,", "kwargs['num_neurons'] self.num_features = kwargs['num_features'] self.embedding_size = kwargs['embedding_size'] self.nClusters = kwargs['nClusters']", "weight = weight_tensor) loss.backward() opti.step() gmm.fit_predict(z.detach().numpy()) self.pi.data = torch.from_numpy(gmm.weights_) self.mu_c.data", "= torch.mm(adj, x) outputs = self.activation(x) return outputs class ReGMM_VGAE(nn.Module):", "beta2: unconf_indices.append(i) else: conf_indices.append(i) unconf_indices = np.asarray(unconf_indices, dtype=int) conf_indices =", "__init__(self, **kwargs): super(ReGMM_VGAE, self).__init__() self.num_neurons = kwargs['num_neurons'] self.num_features = kwargs['num_features']", "np.sqrt(6.0 / (input_dim + output_dim)) initial = torch.rand(input_dim, output_dim)*2*init_range -", "alpha=1.0): X = X.detach().numpy() centers = centers.detach().numpy() if X.size ==", "recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc() print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f,", "= self.nClusters , covariance_type = 'diag') for _ in epoch_bar:", "self.gcn_mean = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda x:x) self.gcn_logstddev", "emb[conflicted_ind] previous_conflicted = conflicted_ind previous_unconflicted = unconflicted_ind else : epoch_stable", "= self.gcn_mean(hidden, adj) self.logstd = self.gcn_logstddev(hidden, adj) gaussian_noise = torch.randn(x_features.size(0),", "= z_sigma2_log[previous_unconflicted] emb_unconf = emb[previous_unconflicted] emb_conf = emb[previous_conflicted] if epoch_stable", "new_predict, average='micro') recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro') return acc, f1_macro,", "= Munkres() cost = cost.__neg__().tolist() indexes = m.compute(cost) # get", "2.0) q = np.transpose(np.transpose(q) / np.sum(q, axis=1)) return q def", "mps = [i1 for i1, e1 in enumerate(self.true_label) if e1", "i, k in enumerate(unconf_indices): adj_k = adj[k].tocsr().indices if not(np.isin(idx[i], adj_k))", "__init__(self, input_dim, output_dim, activation = F.relu, **kwargs): super(GraphConvSparse, self).__init__(**kwargs) self.weight", "gamma=0.9) import os, csv epoch_bar = tqdm(range(epochs)) previous_unconflicted = []", "adj_label = adj + sp.eye(adj.shape[0]) adj_label = sparse_to_tuple(adj_label) adj_label =", "correponding label in l2: c2 = l2[indexes[i][1]] # ai is", "= np.sqrt(6.0 / (input_dim + output_dim)) initial = torch.rand(input_dim, output_dim)*2*init_range", "beta1, beta2) if epoch == 0: adj, adj_label, weight_tensor =", "1e-2 * norm * F.binary_cross_entropy(x_.view(-1), adj_label, weight = weight_tensor) Loss", "adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T), torch.FloatTensor(adj_label[1]), torch.Size(adj_label[2])) weight_mask = adj_label.to_dense().view(-1) == 1", "requires_grad=True) self.mu_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) self.log_sigma2_c = nn.Parameter(torch.randn(self.nClusters, self.embedding_size),requires_grad=True) def", "np.sum(q, axis=1)) return q def generate_unconflicted_data_index(emb, centers_emb, beta1, beta2): unconf_indices", "enumerate(l1): # correponding label in l2: c2 = l2[indexes[i][1]] #", "recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro') f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro')", "adjscore, f1_macro, precision_macro, f1_micro, precision_micro = cm.evaluationClusterModelFromLabel() elbo_loss.backward() opti.step() lr_s.step()", ": Rethinking Graph Autoencoder Models for Attributed Graph Clustering #", "emb_unconf = emb[previous_unconflicted] emb_conf = emb[previous_conflicted] if epoch_stable >= 15:", "epoch_bar: opti.zero_grad() _,_, z = self.encode(features, adj) x_ = self.decode(z)", "from sklearn.neighbors import NearestNeighbors from sklearn import metrics from munkres", "unconflicted_ind, conflicted_ind) if len(previous_unconflicted) < len(unconflicted_ind) : z_mu = z_mu[unconflicted_ind]", "= unconflicted_ind else : epoch_stable += 1 z_mu = z_mu[previous_unconflicted]", "emb[unconf_indices] adj = adj.tolil() idx = unconf_indices[self.generate_centers(emb_unconf)] for i, k", "torch.cat(G,1) def gaussian_pdf_log(self,x,mu,log_sigma2): c = -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1) return c", "_,_, z = self.encode(features, adj) x_ = self.decode(z) loss =", "range(q.shape[0]): confidence1[i] = q[i,a[i,-1]] confidence2[i] = q[i,a[i,-2]] if (confidence1[i]) >", "= 0.089) lr_s = StepLR(opti, step_size=10, gamma=0.9) import os, csv", "def gaussian_pdfs_log(self,x,mus,log_sigma2s): G=[] for c in range(self.nClusters): G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1)) return torch.cat(G,1)", "y_pred[idx[i]]) : adj[k, idx[i]] = 1 for j in adj_k:", "= conflicted_ind previous_unconflicted = unconflicted_ind else : epoch_stable += 1", "# correponding label in l2: c2 = l2[indexes[i][1]] # ai", "return torch.cat(G,1) def gaussian_pdf_log(self,x,mu,log_sigma2): c = -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1) return", "= self.pi log_sigma2_c = self.log_sigma2_c mu_c = self.mu_c det =", "adj, features, y, norm, epochs, lr, beta1, beta2, save_path, dataset):", "= 0 adj = adj.tocsr() adj_label = adj + sp.eye(adj.shape[0])", "conflicted_ind) loss, loss1, elbo_loss = self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor,", "adj.tolil() idx = unconf_indices[self.generate_centers(emb_unconf)] for i, k in enumerate(unconf_indices): adj_k", "= emb[unconf_indices] adj = adj.tolil() idx = unconf_indices[self.generate_centers(emb_unconf)] for i,", "f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc() print('ACC=%f, f1_macro=%f,", "Loss1, Loss+Loss1 def generate_centers(self, emb_unconf): y_pred = self.predict(emb_unconf) nn =", "i, c1 in enumerate(l1): mps = [i1 for i1, e1", "1 for j in adj_k: if np.isin(j, unconf_indices) and (np.isin(idx[i],", "q = np.transpose(np.transpose(q) / np.sum(q, axis=1)) return q def generate_unconflicted_data_index(emb,", "mu_c = self.mu_c log_sigma2_c = self.log_sigma2_c det = 1e-2 Loss", "precision_micro, recall_micro = self.clusteringAcc() print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f,", "recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' % (acc, f1_macro, precision_macro,", "(confidence1[i] - confidence2[i]) > beta2: unconf_indices.append(i) else: conf_indices.append(i) unconf_indices =", "import tqdm from torch.optim import Adam from sklearn.mixture import GaussianMixture", "= float(adj.shape[0] * adj.shape[0] - adj.sum()) / adj.sum() weight_tensor[weight_mask] =", "q = q ** ((alpha + 1.0) / 2.0) q", "axis=1) for i in range(q.shape[0]): confidence1[i] = q[i,a[i,-1]] confidence2[i] =", "= z_mu[unconflicted_ind] z_sigma2_log = z_sigma2_log[unconflicted_ind] emb_unconf = emb[unconflicted_ind] emb_conf =", "!= y_pred[j]): adj[k, j] = 0 adj = adj.tocsr() adj_label", "Loss = Loss * features.size(0) yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det yita_c =", "adj_k: if np.isin(j, unconf_indices) and (np.isin(idx[i], adj_k)) and (y_pred[k] !=", "print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f, NMI=%f, ADJ_RAND_SCORE=%f' %", "in enumerate(l2): mps_d = [i1 for i1 in mps if", "alpha=1.0) confidence1 = q.max(1) confidence2 = np.zeros((q.shape[0],)) a = np.argsort(q,", "recall_micro, nmi, adjscore)) fh = open('recoder.txt', 'a') fh.write('ACC=%f, f1_macro=%f, precision_macro=%f,", "cost = cost.__neg__().tolist() indexes = m.compute(cost) # get the match", "= self.predict(emb) cm = clustering_metrics(y, y_pred) acc, nmi, adjscore, f1_macro,", "adj_norm, adj, features, y, norm, epochs, lr, beta1, beta2, save_path,", "= true_label self.pred_label = predict_label def clusteringAcc(self): # best mapping", "= unconf_indices[self.generate_centers(emb_unconf)] for i, k in enumerate(unconf_indices): adj_k = adj[k].tocsr().indices", "# @Authors : <NAME> (<EMAIL>) & <NAME> (<EMAIL>) # @Paper", "NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy()) _, indices = nn.kneighbors(self.mu_c.detach().numpy()) return indices[y_pred] def", "kwargs['embedding_size'] self.nClusters = kwargs['nClusters'] # VGAE training parameters self.base_gcn =", "return outputs class ReGMM_VGAE(nn.Module): def __init__(self, **kwargs): super(ReGMM_VGAE, self).__init__() self.num_neurons", "idx[i]] = 1 for j in adj_k: if np.isin(j, unconf_indices)", "gaussian_noise = torch.randn(x_features.size(0), self.embedding_size) sampled_z = gaussian_noise * torch.exp(self.logstd) +", "enumerate(unconf_indices): adj_k = adj[k].tocsr().indices if not(np.isin(idx[i], adj_k)) and (y_pred[k] ==", "= NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy()) _, indices = nn.kneighbors(self.mu_c.detach().numpy()) return indices[y_pred]", "= q[i,a[i,-2]] if (confidence1[i]) > beta1 and (confidence1[i] - confidence2[i])", "sparse_to_tuple(adj_label) adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T), torch.FloatTensor(adj_label[1]), torch.Size(adj_label[2])) weight_mask = adj_label.to_dense().view(-1) ==", "sklearn import metrics from munkres import Munkres def random_uniform_init(input_dim, output_dim):", "features, adj, x_, adj_label, weight_tensor, norm, z_mu, z_sigma2_log, emb, L=1):", "z_sigma2_log = z_sigma2_log[previous_unconflicted] emb_unconf = emb[previous_unconflicted] emb_conf = emb[previous_conflicted] if", "cm = clustering_metrics(y, y_pred) acc, nmi, adjscore, f1_macro, precision_macro, f1_micro,", "= self.predict(emb_unconf) nn = NearestNeighbors(n_neighbors= 1, algorithm='ball_tree').fit(emb_unconf.detach().numpy()) _, indices =", "det = 1e-2 yita_c = torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det yita = yita_c.detach().numpy() return", "lambda x:x) self.gcn_logstddev = GraphConvSparse( self.num_neurons, self.embedding_size, activation = lambda", "np.asarray(unconf_indices, dtype=int) conf_indices = np.asarray(conf_indices, dtype=int) return unconf_indices, conf_indices class", "GaussianMixture from torch.optim.lr_scheduler import StepLR from preprocessing import sparse_to_tuple from", "m = Munkres() cost = cost.__neg__().tolist() indexes = m.compute(cost) #", "if e1 == c1] for j, c2 in enumerate(l2): mps_d", "recall_micro = self.clusteringAcc() print('ACC=%f, f1_macro=%f, precision_macro=%f, recall_macro=%f, f1_micro=%f, precision_micro=%f, recall_micro=%f,", "self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_)) self.logstd = self.mean def ELBO_Loss(self, features, adj,", "opti = Adam(self.parameters(), lr=lr, weight_decay = 0.089) lr_s = StepLR(opti,", "cm.evaluationClusterModelFromLabel() elbo_loss.backward() opti.step() lr_s.step() def gaussian_pdfs_log(self,x,mus,log_sigma2s): G=[] for c in", "[] epoch_stable = 0 for epoch in epoch_bar: opti.zero_grad() z_mu,", "adjscore, f1_macro, precision_macro, f1_micro, precision_micro class GraphConvSparse(nn.Module): def __init__(self, input_dim,", "adj_k = adj[k].tocsr().indices if not(np.isin(idx[i], adj_k)) and (y_pred[k] == y_pred[idx[i]])", "sklearn.mixture import GaussianMixture from torch.optim.lr_scheduler import StepLR from preprocessing import", "= q.max(1) confidence2 = np.zeros((q.shape[0],)) a = np.argsort(q, axis=1) for", "1e-2 Loss = 1e-2 * norm * F.binary_cross_entropy(x_.view(-1), adj_label, weight", "predict label l1 = list(set(self.true_label)) numclass1 = len(l1) l2 =", "np.transpose(np.transpose(q) / np.sum(q, axis=1)) return q def generate_unconflicted_data_index(emb, centers_emb, beta1,", "else: conf_indices.append(i) unconf_indices = np.asarray(unconf_indices, dtype=int) conf_indices = np.asarray(conf_indices, dtype=int)", "indices = nn.kneighbors(self.mu_c.detach().numpy()) return indices[y_pred] def update_graph(self, adj, labels, emb,", "elbo_loss = self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor, norm, z_mu ,", "y, emb, unconflicted_ind, conflicted_ind) if len(previous_unconflicted) < len(unconflicted_ind) : z_mu", "z_mu = z_mu[unconflicted_ind] z_sigma2_log = z_sigma2_log[unconflicted_ind] emb_unconf = emb[unconflicted_ind] emb_conf", "-*- coding: utf-8 -*- # @Authors : <NAME> (<EMAIL>) &", "precision_micro, recall_micro def evaluationClusterModelFromLabel(self): nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label) adjscore =", "f1_micro, precision_micro, recall_micro, nmi, adjscore)) fh = open('recoder.txt', 'a') fh.write('ACC=%f,", "self.base_gcn = GraphConvSparse( self.num_features, self.num_neurons) self.gcn_mean = GraphConvSparse( self.num_neurons, self.embedding_size,", "= beta2 * 0.98 if epoch % 50 == 0", "log_sigma2_c = self.log_sigma2_c det = 1e-2 Loss = 1e-2 *", "+ sp.eye(adj.shape[0]) adj_label = sparse_to_tuple(adj_label) adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T), torch.FloatTensor(adj_label[1]), torch.Size(adj_label[2]))", "self.nClusters = kwargs['nClusters'] # VGAE training parameters self.base_gcn = GraphConvSparse(", "nmi = metrics.normalized_mutual_info_score(self.true_label, self.pred_label) adjscore = metrics.adjusted_rand_score(self.true_label, self.pred_label) acc, f1_macro,", "adj + sp.eye(adj.shape[0]) adj_label = sparse_to_tuple(adj_label) adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T), torch.FloatTensor(adj_label[1]),", "i1, e1 in enumerate(self.true_label) if e1 == c1] for j,", "nmi, adjscore) ) fh.write('\\r\\n') fh.flush() fh.close() return acc, nmi, adjscore,", "import scipy.sparse as sp import torch.nn.functional as F from tqdm", "torch.Size(adj_label[2])) weight_mask = adj_label.to_dense().view(-1) == 1 weight_tensor = torch.ones(weight_mask.size(0)) pos_weight_orig", "adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) loss,", "new_predict, average='macro') precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro') recall_macro = metrics.recall_score(self.true_label,", "z_sigma2_log, emb, L=1): pi = self.pi mu_c = self.mu_c log_sigma2_c", "'/pretrain/model.pk')) opti = Adam(self.parameters(), lr=lr, weight_decay = 0.089) lr_s =", "acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro class GraphConvSparse(nn.Module): def", "super(ReGMM_VGAE, self).__init__() self.num_neurons = kwargs['num_neurons'] self.num_features = kwargs['num_features'] self.embedding_size =", "* norm * F.binary_cross_entropy(x_.view(-1), adj_label, weight = weight_tensor) Loss =", "self.mu_c log_sigma2_c = self.log_sigma2_c det = 1e-2 Loss = 1e-2", "i1 in mps if self.pred_label[i1] == c2] cost[i][j] = len(mps_d)", "in enumerate(self.true_label) if e1 == c1] for j, c2 in", "= self.encode(features, adj) x_ = self.decode(z) loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1),", "== 0: adj, adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind,", "beta2): unconf_indices = [] conf_indices = [] q = q_mat(emb,", "pretrain(self, adj, features, adj_label, y, weight_tensor, norm, epochs, lr, save_path,", "if elm == c2] new_predict[ai] = c acc = metrics.accuracy_score(self.true_label,", "python # -*- coding: utf-8 -*- # @Authors : <NAME>", "(acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro, nmi, adjscore)) fh", "adj.tocsr() adj_label = adj + sp.eye(adj.shape[0]) adj_label = sparse_to_tuple(adj_label) adj_label", "average='macro') f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro') precision_micro = metrics.precision_score(self.true_label, new_predict,", "= self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) if len(previous_unconflicted) < len(unconflicted_ind)", "emb, unconflicted_ind, conflicted_ind) if len(previous_unconflicted) < len(unconflicted_ind) : z_mu =", "emb[previous_conflicted] if epoch_stable >= 15: epoch_stable = 0 beta1 =", "- adj.sum()) / adj.sum() weight_tensor[weight_mask] = pos_weight_orig return adj, adj_label,", "self.pi log_sigma2_c = self.log_sigma2_c mu_c = self.mu_c det = 1e-2", "loss = norm*F.binary_cross_entropy(x_.view(-1), adj_label.to_dense().view(-1), weight = weight_tensor) loss.backward() opti.step() gmm.fit_predict(z.detach().numpy())", "self.num_neurons, self.embedding_size, activation = lambda x:x) self.gcn_logstddev = GraphConvSparse( self.num_neurons,", "else : epoch_stable += 1 z_mu = z_mu[previous_unconflicted] z_sigma2_log =", "Adam(self.parameters(), lr=lr) epoch_bar = tqdm(range(epochs)) gmm = GaussianMixture(n_components = self.nClusters", "weight_decay = 0.089) lr_s = StepLR(opti, step_size=10, gamma=0.9) import os,", "return acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro class GraphConvSparse(nn.Module):", "if len(previous_unconflicted) < len(unconflicted_ind) : z_mu = z_mu[unconflicted_ind] z_sigma2_log =", "= F.relu, **kwargs): super(GraphConvSparse, self).__init__(**kwargs) self.weight = random_uniform_init(input_dim, output_dim) self.activation", "in enumerate(unconf_indices): adj_k = adj[k].tocsr().indices if not(np.isin(idx[i], adj_k)) and (y_pred[k]", "= sparse_to_tuple(adj_label) adj_label = torch.sparse.FloatTensor(torch.LongTensor(adj_label[0].T), torch.FloatTensor(adj_label[1]), torch.Size(adj_label[2])) weight_mask = adj_label.to_dense().view(-1)", "the pred_label list ai = [ind for ind, elm in", "self).__init__(**kwargs) self.weight = random_uniform_init(input_dim, output_dim) self.activation = activation def forward(self,", "= np.zeros(len(self.pred_label)) for i, c in enumerate(l1): # correponding label", "f1_micro, precision_micro, recall_micro, nmi, adjscore) ) fh.write('\\r\\n') fh.flush() fh.close() return", "0: q = np.array([]) else: q = 1.0 / (1.0", ") fh.write('\\r\\n') fh.flush() fh.close() return acc, nmi, adjscore, f1_macro, precision_macro,", "forward(self, inputs, adj): x = inputs x = torch.mm(x,self.weight) x", "np.zeros((q.shape[0],)) a = np.argsort(q, axis=1) for i in range(q.shape[0]): confidence1[i]", "outputs = self.activation(x) return outputs class ReGMM_VGAE(nn.Module): def __init__(self, **kwargs):", "emb[previous_unconflicted] emb_conf = emb[previous_conflicted] if epoch_stable >= 15: epoch_stable =", "the index with label==c2 in the pred_label list ai =", "torch.optim import Adam from sklearn.mixture import GaussianMixture from torch.optim.lr_scheduler import", "= 0 beta1 = beta1 * 0.96 beta2 = beta2", "weight_tensor[weight_mask] = pos_weight_orig return adj, adj_label, weight_tensor def train(self, adj_norm,", "conf_indices = [] q = q_mat(emb, centers_emb, alpha=1.0) confidence1 =", "Autoencoder Models for Attributed Graph Clustering # @License : MIT", "self.ELBO_Loss(features, adj_norm, x_, adj_label.to_dense().view(-1), weight_tensor, norm, z_mu , z_sigma2_log, emb_unconf)", "= StepLR(opti, step_size=10, gamma=0.9) import os, csv epoch_bar = tqdm(range(epochs))", "is the index with label==c2 in the pred_label list ai", "F from tqdm import tqdm from torch.optim import Adam from", "= GaussianMixture(n_components = self.nClusters , covariance_type = 'diag') for _", "match results new_predict = np.zeros(len(self.pred_label)) for i, c in enumerate(l1):", "adj) self.logstd = self.gcn_logstddev(hidden, adj) gaussian_noise = torch.randn(x_features.size(0), self.embedding_size) sampled_z", "for ind, elm in enumerate(self.pred_label) if elm == c2] new_predict[ai]", "np.argmax(yita, axis=1) def encode(self, x_features, adj): hidden = self.base_gcn(x_features, adj)", "(<EMAIL>) # @Paper : Rethinking Graph Autoencoder Models for Attributed", "for i1 in mps if self.pred_label[i1] == c2] cost[i][j] =", "def update_graph(self, adj, labels, emb, unconf_indices, conf_indices): k = 0", "z_sigma2_log = z_sigma2_log[unconflicted_ind] emb_unconf = emb[unconflicted_ind] emb_conf = emb[conflicted_ind] previous_conflicted", "lr, beta1, beta2, save_path, dataset): self.load_state_dict(torch.load(save_path + dataset + '/pretrain/model.pk'))", "# -*- coding: utf-8 -*- # @Authors : <NAME> (<EMAIL>)", "adj[k, j] = 0 adj = adj.tocsr() adj_label = adj", "ind, elm in enumerate(self.pred_label) if elm == c2] new_predict[ai] =", "adjscore) ) fh.write('\\r\\n') fh.flush() fh.close() return acc, nmi, adjscore, f1_macro,", "covariance_type = 'diag') for _ in epoch_bar: opti.zero_grad() _,_, z", "Loss1 = KL1 KL2= torch.mean(torch.sum(yita_c*torch.log(pi.unsqueeze(0)/(yita_c)),1))+0.5*torch.mean(torch.sum(1+z_sigma2_log,1)) Loss1 -= KL2 return Loss,", "m.compute(cost) # get the match results new_predict = np.zeros(len(self.pred_label)) for", "= clustering_metrics(y, y_pred) acc, nmi, adjscore, f1_macro, precision_macro, f1_micro, precision_micro", "tqdm import tqdm from torch.optim import Adam from sklearn.mixture import", "epoch % 50 == 0 and epoch <= 200 :", "adj, adj_label, weight_tensor def train(self, adj_norm, adj, features, y, norm,", "torch import numpy as np import torch.nn as nn import", "two clustering results by Munkres algorithm m = Munkres() cost", "# match two clustering results by Munkres algorithm m =", "results by Munkres algorithm m = Munkres() cost = cost.__neg__().tolist()", "import numpy as np import torch.nn as nn import scipy.sparse", "= np.zeros((q.shape[0],)) a = np.argsort(q, axis=1) for i in range(q.shape[0]):", "q ** ((alpha + 1.0) / 2.0) q = np.transpose(np.transpose(q)", "in enumerate(l1): mps = [i1 for i1, e1 in enumerate(self.true_label)", "= yita_c / (yita_c.sum(1).view(-1,1)) KL1 = 0.5 * torch.mean(torch.sum(yita_c*torch.sum(log_sigma2_c.unsqueeze(0)+ torch.exp(z_sigma2_log.unsqueeze(1)-log_sigma2_c.unsqueeze(0))+", "(np.sum(np.square(np.expand_dims(X, 1) - centers), axis=2) / alpha)) q = q", "0 cost = np.zeros((numclass1, numclass2), dtype=int) for i, c1 in", "= inputs x = torch.mm(x,self.weight) x = torch.mm(adj, x) outputs", "= gaussian_noise * torch.exp(self.logstd) + self.mean return self.mean, self.logstd ,sampled_z", "true_label self.pred_label = predict_label def clusteringAcc(self): # best mapping between", "self.pred_label) acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro = self.clusteringAcc()", "+ 1.0) / 2.0) q = np.transpose(np.transpose(q) / np.sum(q, axis=1))", "def __init__(self, **kwargs): super(ReGMM_VGAE, self).__init__() self.num_neurons = kwargs['num_neurons'] self.num_features =", "GMM training parameters self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True) self.mu_c = nn.Parameter(torch.randn(self.nClusters,", "= adj.tocsr() adj_label = adj + sp.eye(adj.shape[0]) adj_label = sparse_to_tuple(adj_label)", "adj_label.to_dense().view(-1), weight_tensor, norm, z_mu , z_sigma2_log, emb_unconf) epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy())) y_pred =", "= emb[unconflicted_ind] emb_conf = emb[conflicted_ind] previous_conflicted = conflicted_ind previous_unconflicted =", "<= 200 : adj, adj_label, weight_tensor = self.update_graph(adj, y, emb,", "new_predict, average='macro') f1_micro = metrics.f1_score(self.true_label, new_predict, average='micro') precision_micro = metrics.precision_score(self.true_label,", "lr_s.step() def gaussian_pdfs_log(self,x,mus,log_sigma2s): G=[] for c in range(self.nClusters): G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1)) return", "clusteringAcc(self): # best mapping between true_label and predict label l1", "numpy as np import torch.nn as nn import scipy.sparse as", "output_dim)) initial = torch.rand(input_dim, output_dim)*2*init_range - init_range return nn.Parameter(initial) def", "gmm.fit_predict(z.detach().numpy()) self.pi.data = torch.from_numpy(gmm.weights_) self.mu_c.data = torch.from_numpy(gmm.means_) self.log_sigma2_c.data = torch.log(torch.from_numpy(gmm.covariances_))", "indices[y_pred] def update_graph(self, adj, labels, emb, unconf_indices, conf_indices): k =", "x = torch.mm(x,self.weight) x = torch.mm(adj, x) outputs = self.activation(x)", "metrics.precision_score(self.true_label, new_predict, average='micro') recall_micro = metrics.recall_score(self.true_label, new_predict, average='micro') return acc,", "Models for Attributed Graph Clustering # @License : MIT License", "> beta2: unconf_indices.append(i) else: conf_indices.append(i) unconf_indices = np.asarray(unconf_indices, dtype=int) conf_indices", "= self.mean def ELBO_Loss(self, features, adj, x_, adj_label, weight_tensor, norm,", "[i1 for i1, e1 in enumerate(self.true_label) if e1 == c1]", "= self.mu_c log_sigma2_c = self.log_sigma2_c det = 1e-2 Loss =", "centers.detach().numpy() if X.size == 0: q = np.array([]) else: q", "= 0 for epoch in epoch_bar: opti.zero_grad() z_mu, z_sigma2_log, emb", "G=[] for c in range(self.nClusters): G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1)) return torch.cat(G,1) def gaussian_pdf_log(self,x,mu,log_sigma2):", "confidence1[i] = q[i,a[i,-1]] confidence2[i] = q[i,a[i,-2]] if (confidence1[i]) > beta1", "**kwargs): super(GraphConvSparse, self).__init__(**kwargs) self.weight = random_uniform_init(input_dim, output_dim) self.activation = activation", "epoch_stable += 1 z_mu = z_mu[previous_unconflicted] z_sigma2_log = z_sigma2_log[previous_unconflicted] emb_unconf", "confidence2[i] = q[i,a[i,-2]] if (confidence1[i]) > beta1 and (confidence1[i] -", "License import torch import numpy as np import torch.nn as", "metrics.adjusted_rand_score(self.true_label, self.pred_label) acc, f1_macro, precision_macro, recall_macro, f1_micro, precision_micro, recall_micro =", "lambda x:x) # GMM training parameters self.pi = nn.Parameter(torch.ones(self.nClusters)/self.nClusters, requires_grad=True)", "= list(set(self.pred_label)) numclass2 = len(l2) if numclass1 != numclass2: print('Class", "previous_unconflicted = [] previous_conflicted = [] epoch_stable = 0 for", "= metrics.f1_score(self.true_label, new_predict, average='micro') precision_micro = metrics.precision_score(self.true_label, new_predict, average='micro') recall_micro", "step_size=10, gamma=0.9) import os, csv epoch_bar = tqdm(range(epochs)) previous_unconflicted =", "adj_label, weight_tensor = self.update_graph(adj, y, emb, unconflicted_ind, conflicted_ind) if len(previous_unconflicted)", "init_range = np.sqrt(6.0 / (input_dim + output_dim)) initial = torch.rand(input_dim,", "self.embedding_size = kwargs['embedding_size'] self.nClusters = kwargs['nClusters'] # VGAE training parameters", "z_mu, z_sigma2_log, emb = self.encode(features, adj_norm) x_ = self.decode(emb) unconflicted_ind,", "* 0.98 if epoch % 50 == 0 and epoch", "torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(z,mu_c,log_sigma2_c))+det yita = yita_c.detach().numpy() return np.argmax(yita, axis=1) def encode(self, x_features,", "if epoch_stable >= 15: epoch_stable = 0 beta1 = beta1", "def forward(self, inputs, adj): x = inputs x = torch.mm(x,self.weight)", "z_sigma2_log, emb = self.encode(features, adj_norm) x_ = self.decode(emb) unconflicted_ind, conflicted_ind", "= torch.exp(torch.log(pi.unsqueeze(0))+self.gaussian_pdfs_log(emb,mu_c,log_sigma2_c))+det yita_c = yita_c / (yita_c.sum(1).view(-1,1)) KL1 = 0.5", "G.append(self.gaussian_pdf_log(x,mus[c:c+1,:],log_sigma2s[c:c+1,:]).view(-1,1)) return torch.cat(G,1) def gaussian_pdf_log(self,x,mu,log_sigma2): c = -0.5 * torch.sum(np.log(np.pi*2)+log_sigma2+(x-mu).pow(2)/torch.exp(log_sigma2),1)", "c2] cost[i][j] = len(mps_d) # match two clustering results by", "- confidence2[i]) > beta2: unconf_indices.append(i) else: conf_indices.append(i) unconf_indices = np.asarray(unconf_indices,", "precision_macro = metrics.precision_score(self.true_label, new_predict, average='macro') recall_macro = metrics.recall_score(self.true_label, new_predict, average='macro')", "for i, c1 in enumerate(l1): mps = [i1 for i1,", "yita = yita_c.detach().numpy() return np.argmax(yita, axis=1) def encode(self, x_features, adj):", ", z_sigma2_log, emb_unconf) epoch_bar.write('Loss={:.4f}'.format(elbo_loss.detach().numpy())) y_pred = self.predict(emb) cm = clustering_metrics(y,", "unconflicted_ind else : epoch_stable += 1 z_mu = z_mu[previous_unconflicted] z_sigma2_log" ]
[ "PropertyObj = self.env['ir.property'] # Property Stock Journal value = self.env['account.journal'].search([('company_id',", "copyright and licensing details. from odoo import api, models, _", "'=', 'product.category'), ('relation', '=', 'account.journal')], limit=1) vals = { 'name':", "properties.write(vals) else: # create the property PropertyObj.create(vals) todo_list = [", "= self.env['ir.property'] # Property Stock Journal value = self.env['account.journal'].search([('company_id', '=',", "in todo_list: account = getattr(self, record) value = account and", "Property Stock Accounts 'property_stock_account_input_categ_id', 'property_stock_account_output_categ_id', 'property_stock_valuation_account_id', ] for record in", "'company_id': company.id, 'fields_id': field.id, 'value': value, } properties = PropertyObj.search([('name',", "'fields_id': field.id, 'value': value, } properties = PropertyObj.search([('name', '=', record),", "'product.category'), ('relation', '=', 'account.account')], limit=1) vals = { 'name': record,", "property PropertyObj.create(vals) todo_list = [ # Property Stock Accounts 'property_stock_account_input_categ_id',", "if value: field = self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'), ('model', '=', 'product.category'),", "'=', 'product.category'), ('relation', '=', 'account.account')], limit=1) vals = { 'name':", "'account.account')], limit=1) vals = { 'name': record, 'company_id': company.id, 'fields_id':", "field = self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'), ('model', '=', 'product.category'), ('relation', '=',", "properties: # the property exist: modify it properties.write(vals) else: #", "self.env['ir.model.fields'].search([('name', '=', record), ('model', '=', 'product.category'), ('relation', '=', 'account.account')], limit=1)", "'value': 'account.journal,%s' % value.id, } properties = PropertyObj.search([('name', '=', 'property_stock_journal'),", "= { 'name': record, 'company_id': company.id, 'fields_id': field.id, 'value': value,", "[{'name': _('Inventory Valuation'), 'type': 'general', 'code': 'STJ', 'favorite': False, 'sequence':", "value = self.env['account.journal'].search([('company_id', '=', company.id), ('code', '=', 'STJ'), ('type', '=',", "record) value = account and 'account.account,' + str(acc_template_ref[account.id]) or False", "_ import logging _logger = logging.getLogger(__name__) class AccountChartTemplate(models.Model): _inherit =", "'account.journal,%s' % value.id, } properties = PropertyObj.search([('name', '=', 'property_stock_journal'), ('company_id',", "todo_list = [ # Property Stock Accounts 'property_stock_account_input_categ_id', 'property_stock_account_output_categ_id', 'property_stock_valuation_account_id',", "vals = { 'name': 'property_stock_journal', 'company_id': company.id, 'fields_id': field.id, 'value':", "('company_id', '=', company.id)], limit=1) if not properties: # create the", "# the property exist: modify it properties.write(vals) else: # create", "('type', '=', 'general')], limit=1) if value: field = self.env['ir.model.fields'].search([('name', '=',", "the property PropertyObj.create(vals) todo_list = [ # Property Stock Accounts", "company.id, 'fields_id': field.id, 'value': value, } properties = PropertyObj.search([('name', '=',", "Property Stock Journal value = self.env['account.journal'].search([('company_id', '=', company.id), ('code', '=',", "'=', company.id), ('code', '=', 'STJ'), ('type', '=', 'general')], limit=1) if", "logging.getLogger(__name__) class AccountChartTemplate(models.Model): _inherit = \"account.chart.template\" @api.model def generate_journals(self, acc_template_ref,", "company.id), ('code', '=', 'STJ'), ('type', '=', 'general')], limit=1) if value:", "res = super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company) PropertyObj = self.env['ir.property'] # Property", "'name': 'property_stock_journal', 'company_id': company.id, 'fields_id': field.id, 'value': 'account.journal,%s' % value.id,", "'=', 'account.account')], limit=1) vals = { 'name': record, 'company_id': company.id,", "if properties: # the property exist: modify it properties.write(vals) else:", "False, 'sequence': 8}] return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add) def generate_properties(self,", "'=', 'general')], limit=1) if value: field = self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'),", "Journal value = self.env['account.journal'].search([('company_id', '=', company.id), ('code', '=', 'STJ'), ('type',", "'=', company.id)]) if properties: # the property exist: modify it", "elif not properties.value_reference: # update the property if False properties.write(vals)", "See LICENSE file for full copyright and licensing details. from", "'=', company.id)], limit=1) if not properties: # create the property", "Valuation'), 'type': 'general', 'code': 'STJ', 'favorite': False, 'sequence': 8}] return", "utf-8 -*- # Part of Odoo. See LICENSE file for", "logging _logger = logging.getLogger(__name__) class AccountChartTemplate(models.Model): _inherit = \"account.chart.template\" @api.model", "[ # Property Stock Accounts 'property_stock_account_input_categ_id', 'property_stock_account_output_categ_id', 'property_stock_valuation_account_id', ] for", "account and 'account.account,' + str(acc_template_ref[account.id]) or False if value: field", "{ 'name': record, 'company_id': company.id, 'fields_id': field.id, 'value': value, }", "'general')], limit=1) if value: field = self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'), ('model',", "licensing details. from odoo import api, models, _ import logging", "super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add) def generate_properties(self, acc_template_ref, company, property_list=None): res", "not properties.value_reference: # update the property if False properties.write(vals) return", "'property_stock_journal'), ('company_id', '=', company.id)]) if properties: # the property exist:", "'=', 'property_stock_journal'), ('model', '=', 'product.category'), ('relation', '=', 'account.journal')], limit=1) vals", "('relation', '=', 'account.journal')], limit=1) vals = { 'name': 'property_stock_journal', 'company_id':", "= super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company) PropertyObj = self.env['ir.property'] # Property Stock", "record), ('company_id', '=', company.id)], limit=1) if not properties: # create", "= [{'name': _('Inventory Valuation'), 'type': 'general', 'code': 'STJ', 'favorite': False,", "journals_dict=journal_to_add) def generate_properties(self, acc_template_ref, company, property_list=None): res = super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref,", "'account.account,' + str(acc_template_ref[account.id]) or False if value: field = self.env['ir.model.fields'].search([('name',", "acc_template_ref, company, property_list=None): res = super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company) PropertyObj =", "'name': record, 'company_id': company.id, 'fields_id': field.id, 'value': value, } properties", "% value.id, } properties = PropertyObj.search([('name', '=', 'property_stock_journal'), ('company_id', '=',", "for record in todo_list: account = getattr(self, record) value =", "models, _ import logging _logger = logging.getLogger(__name__) class AccountChartTemplate(models.Model): _inherit", "= PropertyObj.search([('name', '=', record), ('company_id', '=', company.id)], limit=1) if not", "full copyright and licensing details. from odoo import api, models,", "'code': 'STJ', 'favorite': False, 'sequence': 8}] return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company,", "return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add) def generate_properties(self, acc_template_ref, company, property_list=None):", "company, property_list=None): res = super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company) PropertyObj = self.env['ir.property']", "if value: field = self.env['ir.model.fields'].search([('name', '=', record), ('model', '=', 'product.category'),", "'favorite': False, 'sequence': 8}] return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add) def", "'=', 'account.journal')], limit=1) vals = { 'name': 'property_stock_journal', 'company_id': company.id,", "acc_template_ref, company, journals_dict=None): journal_to_add = [{'name': _('Inventory Valuation'), 'type': 'general',", "'property_stock_journal', 'company_id': company.id, 'fields_id': field.id, 'value': 'account.journal,%s' % value.id, }", "property PropertyObj.create(vals) elif not properties.value_reference: # update the property if", "properties = PropertyObj.search([('name', '=', record), ('company_id', '=', company.id)], limit=1) if", "# Part of Odoo. See LICENSE file for full copyright", "file for full copyright and licensing details. from odoo import", "import api, models, _ import logging _logger = logging.getLogger(__name__) class", "field.id, 'value': 'account.journal,%s' % value.id, } properties = PropertyObj.search([('name', '=',", "-*- coding: utf-8 -*- # Part of Odoo. See LICENSE", "PropertyObj.search([('name', '=', 'property_stock_journal'), ('company_id', '=', company.id)]) if properties: # the", "'value': value, } properties = PropertyObj.search([('name', '=', record), ('company_id', '=',", "record, 'company_id': company.id, 'fields_id': field.id, 'value': value, } properties =", "'STJ'), ('type', '=', 'general')], limit=1) if value: field = self.env['ir.model.fields'].search([('name',", "PropertyObj.create(vals) todo_list = [ # Property Stock Accounts 'property_stock_account_input_categ_id', 'property_stock_account_output_categ_id',", "not properties: # create the property PropertyObj.create(vals) elif not properties.value_reference:", "# -*- coding: utf-8 -*- # Part of Odoo. See", "import logging _logger = logging.getLogger(__name__) class AccountChartTemplate(models.Model): _inherit = \"account.chart.template\"", "AccountChartTemplate(models.Model): _inherit = \"account.chart.template\" @api.model def generate_journals(self, acc_template_ref, company, journals_dict=None):", "'=', 'STJ'), ('type', '=', 'general')], limit=1) if value: field =", "-*- # Part of Odoo. See LICENSE file for full", "properties = PropertyObj.search([('name', '=', 'property_stock_journal'), ('company_id', '=', company.id)]) if properties:", "} properties = PropertyObj.search([('name', '=', record), ('company_id', '=', company.id)], limit=1)", "= [ # Property Stock Accounts 'property_stock_account_input_categ_id', 'property_stock_account_output_categ_id', 'property_stock_valuation_account_id', ]", "= \"account.chart.template\" @api.model def generate_journals(self, acc_template_ref, company, journals_dict=None): journal_to_add =", "'=', record), ('model', '=', 'product.category'), ('relation', '=', 'account.account')], limit=1) vals", "properties.value_reference: # update the property if False properties.write(vals) return res", "'STJ', 'favorite': False, 'sequence': 8}] return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add)", "PropertyObj.create(vals) elif not properties.value_reference: # update the property if False", "'property_stock_account_input_categ_id', 'property_stock_account_output_categ_id', 'property_stock_valuation_account_id', ] for record in todo_list: account =", "@api.model def generate_journals(self, acc_template_ref, company, journals_dict=None): journal_to_add = [{'name': _('Inventory", "Odoo. See LICENSE file for full copyright and licensing details.", "{ 'name': 'property_stock_journal', 'company_id': company.id, 'fields_id': field.id, 'value': 'account.journal,%s' %", "# create the property PropertyObj.create(vals) todo_list = [ # Property", "account = getattr(self, record) value = account and 'account.account,' +", "details. from odoo import api, models, _ import logging _logger", "create the property PropertyObj.create(vals) elif not properties.value_reference: # update the", "self.env['ir.property'] # Property Stock Journal value = self.env['account.journal'].search([('company_id', '=', company.id),", "company=company) PropertyObj = self.env['ir.property'] # Property Stock Journal value =", "create the property PropertyObj.create(vals) todo_list = [ # Property Stock", "and licensing details. from odoo import api, models, _ import", "_('Inventory Valuation'), 'type': 'general', 'code': 'STJ', 'favorite': False, 'sequence': 8}]", "'=', record), ('company_id', '=', company.id)], limit=1) if not properties: #", "vals = { 'name': record, 'company_id': company.id, 'fields_id': field.id, 'value':", "PropertyObj.search([('name', '=', record), ('company_id', '=', company.id)], limit=1) if not properties:", "and 'account.account,' + str(acc_template_ref[account.id]) or False if value: field =", "of Odoo. See LICENSE file for full copyright and licensing", "properties: # create the property PropertyObj.create(vals) elif not properties.value_reference: #", "('model', '=', 'product.category'), ('relation', '=', 'account.account')], limit=1) vals = {", "value = account and 'account.account,' + str(acc_template_ref[account.id]) or False if", "Accounts 'property_stock_account_input_categ_id', 'property_stock_account_output_categ_id', 'property_stock_valuation_account_id', ] for record in todo_list: account", "'type': 'general', 'code': 'STJ', 'favorite': False, 'sequence': 8}] return super(AccountChartTemplate,", "generate_journals(self, acc_template_ref, company, journals_dict=None): journal_to_add = [{'name': _('Inventory Valuation'), 'type':", "Stock Accounts 'property_stock_account_input_categ_id', 'property_stock_account_output_categ_id', 'property_stock_valuation_account_id', ] for record in todo_list:", "api, models, _ import logging _logger = logging.getLogger(__name__) class AccountChartTemplate(models.Model):", "class AccountChartTemplate(models.Model): _inherit = \"account.chart.template\" @api.model def generate_journals(self, acc_template_ref, company,", "= account and 'account.account,' + str(acc_template_ref[account.id]) or False if value:", "for full copyright and licensing details. from odoo import api,", "# Property Stock Accounts 'property_stock_account_input_categ_id', 'property_stock_account_output_categ_id', 'property_stock_valuation_account_id', ] for record", "str(acc_template_ref[account.id]) or False if value: field = self.env['ir.model.fields'].search([('name', '=', record),", "or False if value: field = self.env['ir.model.fields'].search([('name', '=', record), ('model',", "limit=1) if not properties: # create the property PropertyObj.create(vals) elif", "self.env['account.journal'].search([('company_id', '=', company.id), ('code', '=', 'STJ'), ('type', '=', 'general')], limit=1)", "exist: modify it properties.write(vals) else: # create the property PropertyObj.create(vals)", "odoo import api, models, _ import logging _logger = logging.getLogger(__name__)", "<filename>odoo-13.0/addons/stock_account/models/account_chart_template.py # -*- coding: utf-8 -*- # Part of Odoo.", "LICENSE file for full copyright and licensing details. from odoo", "= self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'), ('model', '=', 'product.category'), ('relation', '=', 'account.journal')],", "+ str(acc_template_ref[account.id]) or False if value: field = self.env['ir.model.fields'].search([('name', '=',", "company, journals_dict=None): journal_to_add = [{'name': _('Inventory Valuation'), 'type': 'general', 'code':", "'sequence': 8}] return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add) def generate_properties(self, acc_template_ref,", "'product.category'), ('relation', '=', 'account.journal')], limit=1) vals = { 'name': 'property_stock_journal',", "if not properties: # create the property PropertyObj.create(vals) elif not", "value: field = self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'), ('model', '=', 'product.category'), ('relation',", "'property_stock_valuation_account_id', ] for record in todo_list: account = getattr(self, record)", "it properties.write(vals) else: # create the property PropertyObj.create(vals) todo_list =", "Stock Journal value = self.env['account.journal'].search([('company_id', '=', company.id), ('code', '=', 'STJ'),", "else: # create the property PropertyObj.create(vals) todo_list = [ #", "def generate_properties(self, acc_template_ref, company, property_list=None): res = super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company)", "'account.journal')], limit=1) vals = { 'name': 'property_stock_journal', 'company_id': company.id, 'fields_id':", "company.id)], limit=1) if not properties: # create the property PropertyObj.create(vals)", "generate_properties(self, acc_template_ref, company, property_list=None): res = super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company) PropertyObj", "from odoo import api, models, _ import logging _logger =", "modify it properties.write(vals) else: # create the property PropertyObj.create(vals) todo_list", "_logger = logging.getLogger(__name__) class AccountChartTemplate(models.Model): _inherit = \"account.chart.template\" @api.model def", "('code', '=', 'STJ'), ('type', '=', 'general')], limit=1) if value: field", "# create the property PropertyObj.create(vals) elif not properties.value_reference: # update", "\"account.chart.template\" @api.model def generate_journals(self, acc_template_ref, company, journals_dict=None): journal_to_add = [{'name':", "the property PropertyObj.create(vals) elif not properties.value_reference: # update the property", "= self.env['ir.model.fields'].search([('name', '=', record), ('model', '=', 'product.category'), ('relation', '=', 'account.account')],", "journals_dict=None): journal_to_add = [{'name': _('Inventory Valuation'), 'type': 'general', 'code': 'STJ',", "company.id, 'fields_id': field.id, 'value': 'account.journal,%s' % value.id, } properties =", "= self.env['account.journal'].search([('company_id', '=', company.id), ('code', '=', 'STJ'), ('type', '=', 'general')],", "'general', 'code': 'STJ', 'favorite': False, 'sequence': 8}] return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref,", "'property_stock_account_output_categ_id', 'property_stock_valuation_account_id', ] for record in todo_list: account = getattr(self,", "'company_id': company.id, 'fields_id': field.id, 'value': 'account.journal,%s' % value.id, } properties", "self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add) def generate_properties(self, acc_template_ref, company, property_list=None): res =", "# Property Stock Journal value = self.env['account.journal'].search([('company_id', '=', company.id), ('code',", "value.id, } properties = PropertyObj.search([('name', '=', 'property_stock_journal'), ('company_id', '=', company.id)])", "= getattr(self, record) value = account and 'account.account,' + str(acc_template_ref[account.id])", "def generate_journals(self, acc_template_ref, company, journals_dict=None): journal_to_add = [{'name': _('Inventory Valuation'),", "record), ('model', '=', 'product.category'), ('relation', '=', 'account.account')], limit=1) vals =", "'fields_id': field.id, 'value': 'account.journal,%s' % value.id, } properties = PropertyObj.search([('name',", "('model', '=', 'product.category'), ('relation', '=', 'account.journal')], limit=1) vals = {", "('relation', '=', 'account.account')], limit=1) vals = { 'name': record, 'company_id':", "coding: utf-8 -*- # Part of Odoo. See LICENSE file", "limit=1) vals = { 'name': 'property_stock_journal', 'company_id': company.id, 'fields_id': field.id,", "company.id)]) if properties: # the property exist: modify it properties.write(vals)", "= PropertyObj.search([('name', '=', 'property_stock_journal'), ('company_id', '=', company.id)]) if properties: #", "the property exist: modify it properties.write(vals) else: # create the", "super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company) PropertyObj = self.env['ir.property'] # Property Stock Journal", "record in todo_list: account = getattr(self, record) value = account", "value, } properties = PropertyObj.search([('name', '=', record), ('company_id', '=', company.id)],", "property exist: modify it properties.write(vals) else: # create the property", "] for record in todo_list: account = getattr(self, record) value", "Part of Odoo. See LICENSE file for full copyright and", "company=company, journals_dict=journal_to_add) def generate_properties(self, acc_template_ref, company, property_list=None): res = super(AccountChartTemplate,", "'property_stock_journal'), ('model', '=', 'product.category'), ('relation', '=', 'account.journal')], limit=1) vals =", "'=', 'property_stock_journal'), ('company_id', '=', company.id)]) if properties: # the property", "_inherit = \"account.chart.template\" @api.model def generate_journals(self, acc_template_ref, company, journals_dict=None): journal_to_add", "property_list=None): res = super(AccountChartTemplate, self).generate_properties(acc_template_ref=acc_template_ref, company=company) PropertyObj = self.env['ir.property'] #", "value: field = self.env['ir.model.fields'].search([('name', '=', record), ('model', '=', 'product.category'), ('relation',", "('company_id', '=', company.id)]) if properties: # the property exist: modify", "field = self.env['ir.model.fields'].search([('name', '=', record), ('model', '=', 'product.category'), ('relation', '=',", "limit=1) vals = { 'name': record, 'company_id': company.id, 'fields_id': field.id,", "journal_to_add = [{'name': _('Inventory Valuation'), 'type': 'general', 'code': 'STJ', 'favorite':", "limit=1) if value: field = self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'), ('model', '=',", "self.env['ir.model.fields'].search([('name', '=', 'property_stock_journal'), ('model', '=', 'product.category'), ('relation', '=', 'account.journal')], limit=1)", "= logging.getLogger(__name__) class AccountChartTemplate(models.Model): _inherit = \"account.chart.template\" @api.model def generate_journals(self,", "= { 'name': 'property_stock_journal', 'company_id': company.id, 'fields_id': field.id, 'value': 'account.journal,%s'", "field.id, 'value': value, } properties = PropertyObj.search([('name', '=', record), ('company_id',", "} properties = PropertyObj.search([('name', '=', 'property_stock_journal'), ('company_id', '=', company.id)]) if", "getattr(self, record) value = account and 'account.account,' + str(acc_template_ref[account.id]) or", "todo_list: account = getattr(self, record) value = account and 'account.account,'", "self).generate_properties(acc_template_ref=acc_template_ref, company=company) PropertyObj = self.env['ir.property'] # Property Stock Journal value", "False if value: field = self.env['ir.model.fields'].search([('name', '=', record), ('model', '=',", "8}] return super(AccountChartTemplate, self).generate_journals(acc_template_ref=acc_template_ref, company=company, journals_dict=journal_to_add) def generate_properties(self, acc_template_ref, company," ]
[ "blobs def __len__(self): return self.DATA_SIZE def cal_minibatch_ratio(ratio_list): \"\"\"Given the ratio_list,", "Example: >>> list(BatchSampler(range(10), batch_size=3, drop_last=False)) [[0, 1, 2], [3, 4,", "raise ValueError(\"sampler should be an instance of \" \"torch.utils.data.Sampler, but", "ndarray\". lists = [] for blobs in list_of_blobs: lists.append({'data' :", "and not self.drop_last: yield batch def __len__(self): if self.drop_last: return", "import torch.utils.data as data import torch.utils.data.sampler as torch_sampler from torch.utils.data.dataloader", "be an instance of \" \"torch.utils.data.Sampler, but got sampler={}\" .format(sampler))", "the RATIO same for each minibatch on each GPU. Note:", "size would be less than ``batch_size`` Example: >>> list(BatchSampler(range(10), batch_size=3,", "as torch_sampler from torch.utils.data.dataloader import default_collate from torch._six import int_classes", "self._num_classes = num_classes self.training = training self.DATA_SIZE = len(self._roidb) def", "list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) def __len__(self): return", "key in blobs: # if key != 'roidb': # blobs[key]", "= batch_size self.drop_last = drop_last def __iter__(self): batch = []", "isinstance(batch_size, bool) or \\ batch_size <= 0: raise ValueError(\"batch_size should", "a boolean value, but got \" \"drop_last={}\".format(drop_last)) self.sampler = sampler", "# Include leftovers for i in range(num_minibatch): left_idx = i", "Squeeze batch dim # for key in blobs: # if", "__init__(self, sampler, batch_size, drop_last): if not isinstance(sampler, torch_sampler.Sampler): raise ValueError(\"sampler", "#TODO: Check if minibatch is valid ? If not, abandon", "cfg.TRAIN.SCALES containing SINGLE scale. Since all prepared images will have", "= [] if len(batch) > 0 and not self.drop_last: yield", "range(num_minibatch): left_idx = i * cfg.TRAIN.IMS_PER_BATCH right_idx = min((i+1) *", "of \"list of ndarray\". lists = [] for blobs in", "1: # for ratio > 1, we preserve the rightmost", "is ignored during `prep_im_for_blob` and 2) cfg.TRAIN.SCALES containing SINGLE scale.", "i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH): mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)]", "yield batch batch = [] if len(batch) > 0 and", "__len__(self): if self.drop_last: return len(self.sampler) // self.batch_size else: return (len(self.sampler)", "[self._roidb[index]] blobs, valid = get_minibatch(single_db, self._num_classes) #TODO: Check if minibatch", "= i * cfg.TRAIN.IMS_PER_BATCH right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH -", "__init__(self, ratio_list, ratio_index): self.ratio_list = ratio_list self.ratio_index = ratio_index self.num_data", "can't be batch into a tensor. # So we keep", "cfg.TRAIN.IMS_PER_BATCH)] minibatch = default_collate(mini_list) for key in minibatch: Batch[key].append(minibatch[key]) return", "ratio_list_minibatch.tolist())) def __len__(self): return self.num_data class BatchSampler(torch_sampler.BatchSampler): r\"\"\"Wraps another sampler", "blobs in list_of_blobs: lists.append({'data' : blobs.pop('data'), 'rois' : blobs.pop('rois'), 'labels'", "list of minibatches A batch contains NUM_GPUS minibatches and image", "[] for blobs in list_of_blobs: lists.append({'data' : blobs.pop('data'), 'rois' :", "import get_minibatch import utils.blob as blob_utils # from model.rpn.bbox_transform import", "for key in list_of_blobs[0]} # Because roidb consists of entries", "import utils.blob as blob_utils # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes", "'rois' : blobs.pop('rois'), 'labels' : blobs.pop('labels')}) for i in range(0,", "batch_size={}\".format(batch_size)) if not isinstance(drop_last, bool): raise ValueError(\"drop_last should be a", "minibatches A batch contains NUM_GPUS minibatches and image size in", "1, we preserve the rightmost in each batch. target_ratio =", "we need to stack smaples from each minibatch seperately. \"\"\"", "Base sampler. batch_size (int): Size of mini-batch. drop_last (bool): If", "_worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim # for key", "1. target_ratio = 1 ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio return ratio_list_minibatch class", "1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob` and 2) cfg.TRAIN.SCALES containing", "in each batch. target_ratio = ratio_list[left_idx] elif ratio_list[left_idx] > 1:", "for ratio > 1, we preserve the rightmost in each", "bbox_transform_inv, clip_boxes class RoiDataLoader(data.Dataset): def __init__(self, roidb, num_classes, training=True): self._roidb", "ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch =", "should be a boolean value, but got \" \"drop_last={}\".format(drop_last)) self.sampler", "to stack smaples from each minibatch seperately. \"\"\" Batch =", "= blobs['data'].squeeze(axis=0) return blobs def __len__(self): return self.DATA_SIZE def cal_minibatch_ratio(ratio_list):", "each batch. target_ratio = ratio_list[left_idx] elif ratio_list[left_idx] > 1: #", "``True``, the sampler will drop the last batch if its", "during `prep_im_for_blob` and 2) cfg.TRAIN.SCALES containing SINGLE scale. Since all", "in range(num_minibatch): left_idx = i * cfg.TRAIN.IMS_PER_BATCH right_idx = min((i+1)", "model.rpn.bbox_transform import bbox_transform_inv, clip_boxes class RoiDataLoader(data.Dataset): def __init__(self, roidb, num_classes,", "ratio_list, ratio_index): self.ratio_list = ratio_list self.ratio_index = ratio_index self.num_data =", "ratio_list_minibatch class MinibatchSampler(torch_sampler.Sampler): def __init__(self, ratio_list, ratio_index): self.ratio_list = ratio_list", "+ cfg.TRAIN.IMS_PER_BATCH)] minibatch = default_collate(mini_list) for key in minibatch: Batch[key].append(minibatch[key])", ": blobs.pop('rois'), 'labels' : blobs.pop('labels')}) for i in range(0, len(list_of_blobs),", "sampler={}\" .format(sampler)) if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or", "/ cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers for i in range(num_minibatch): left_idx", "# Because roidb consists of entries of variable length, it", "would be less than ``batch_size`` Example: >>> list(BatchSampler(range(10), batch_size=3, drop_last=False))", "batch_size <= 0: raise ValueError(\"batch_size should be a positive integeral", "class RoiDataLoader(data.Dataset): def __init__(self, roidb, num_classes, training=True): self._roidb = roidb", "batch contains NUM_GPUS minibatches and image size in different minibatch", "ratio cross 1, we make it to be 1. target_ratio", "\"\"\"Given the ratio_list, we want to make the RATIO same", "minibatch on each GPU. Note: this only work for 1)", "\"\"\"Stack samples seperately and return a list of minibatches A", "import torch.utils.data.sampler as torch_sampler from torch.utils.data.dataloader import default_collate from torch._six", "# for ratio < 1, we preserve the leftmost in", "elif ratio_list[left_idx] > 1: # for ratio > 1, we", "cfg.TRAIN.IMS_PER_BATCH): mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)] minibatch = default_collate(mini_list) for", "should be a positive integeral value, \" \"but got batch_size={}\".format(batch_size))", "import default_collate from torch._six import int_classes as _int_classes from core.config", "* cfg.TRAIN.IMS_PER_BATCH right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE", "lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)] minibatch = default_collate(mini_list) for key in minibatch:", "that. \"\"\" DATA_SIZE = len(ratio_list) ratio_list_minibatch = np.empty((DATA_SIZE,)) num_minibatch =", "instance of \" \"torch.utils.data.Sampler, but got sampler={}\" .format(sampler)) if not", "target_ratio = ratio_list[right_idx] else: # for ratio cross 1, we", "\"\"\" DATA_SIZE = len(ratio_list) ratio_list_minibatch = np.empty((DATA_SIZE,)) num_minibatch = int(np.ceil(DATA_SIZE", "data import torch.utils.data.sampler as torch_sampler from torch.utils.data.dataloader import default_collate from", "variable length, it can't be batch into a tensor. #", "# Difference: batch.append(int(idx)) if len(batch) == self.batch_size: yield batch batch", "list(BatchSampler(range(10), batch_size=3, drop_last=True)) [[0, 1, 2], [3, 4, 5], [6,", "torch_sampler from torch.utils.data.dataloader import default_collate from torch._six import int_classes as", "self.DATA_SIZE = len(self._roidb) def __getitem__(self, index_tuple): index, ratio = index_tuple", "= [self._roidb[index]] blobs, valid = get_minibatch(single_db, self._num_classes) #TODO: Check if", "of indices. Args: sampler (Sampler): Base sampler. batch_size (int): Size", "blobs.pop('labels')}) for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH): mini_list = lists[i:(i", "for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob` and 2) cfg.TRAIN.SCALES", "as _int_classes from core.config import cfg from roi_data.minibatch import get_minibatch", "MinibatchSampler(torch_sampler.Sampler): def __init__(self, ratio_list, ratio_index): self.ratio_list = ratio_list self.ratio_index =", "size in different minibatch may be different. Hence, we need", "rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] #", "= 1 ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio return ratio_list_minibatch class MinibatchSampler(torch_sampler.Sampler): def", "= blobs[key].squeeze(axis=0) blobs['data'] = blobs['data'].squeeze(axis=0) return blobs def __len__(self): return", "numpy as np import numpy.random as npr import torch import", "in blobs: # if key != 'roidb': # blobs[key] =", "target_ratio = ratio_list[left_idx] elif ratio_list[left_idx] > 1: # for ratio", "right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1)", "the sampler will drop the last batch if its size", "BatchSampler(torch_sampler.BatchSampler): r\"\"\"Wraps another sampler to yield a mini-batch of indices.", "batch if its size would be less than ``batch_size`` Example:", "If not, abandon it. # Need to change _worker_loop in", "dim # for key in blobs: # if key !=", "this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob`", "left_idx = i * cfg.TRAIN.IMS_PER_BATCH right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH", "[[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]]", "the ratio_list, we want to make the RATIO same for", "torch import torch.utils.data as data import torch.utils.data.sampler as torch_sampler from", "import math import numpy as np import numpy.random as npr", "7, 8]] \"\"\" def __init__(self, sampler, batch_size, drop_last): if not", "it can't be batch into a tensor. # So we", "= npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate", "in list_of_blobs[0]} # Because roidb consists of entries of variable", "we can pad and batch images base on that. \"\"\"", "from torch._six import int_classes as _int_classes from core.config import cfg", "if not isinstance(sampler, torch_sampler.Sampler): raise ValueError(\"sampler should be an instance", "for i in range(num_minibatch): left_idx = i * cfg.TRAIN.IMS_PER_BATCH right_idx", "roidb, num_classes, training=True): self._roidb = roidb self._num_classes = num_classes self.training", "= cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) def __len__(self): return self.num_data class", "# re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(),", "if ratio_list[right_idx] < 1: # for ratio < 1, we", "[] if len(batch) > 0 and not self.drop_last: yield batch", "batch images base on that. \"\"\" DATA_SIZE = len(ratio_list) ratio_list_minibatch", "training self.DATA_SIZE = len(self._roidb) def __getitem__(self, index_tuple): index, ratio =", "Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim", "each minibatch seperately. \"\"\" Batch = {key: [] for key", "blobs: # if key != 'roidb': # blobs[key] = blobs[key].squeeze(axis=0)", "ratio_list[left_idx] elif ratio_list[left_idx] > 1: # for ratio > 1,", "> 0 and not self.drop_last: yield batch def __len__(self): if", "= get_minibatch(single_db, self._num_classes) #TODO: Check if minibatch is valid ?", "1, 2], [3, 4, 5], [6, 7, 8]] \"\"\" def", "RoiDataLoader(data.Dataset): def __init__(self, roidb, num_classes, training=True): self._roidb = roidb self._num_classes", "drop_last=True)) [[0, 1, 2], [3, 4, 5], [6, 7, 8]]", "got batch_size={}\".format(batch_size)) if not isinstance(drop_last, bool): raise ValueError(\"drop_last should be", "1, we preserve the leftmost in each batch. target_ratio =", "batch. target_ratio = ratio_list[right_idx] else: # for ratio cross 1,", "isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \\ batch_size <= 0:", "int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers for i in range(num_minibatch):", "__init__(self, roidb, num_classes, training=True): self._roidb = roidb self._num_classes = num_classes", "self.num_data class BatchSampler(torch_sampler.BatchSampler): r\"\"\"Wraps another sampler to yield a mini-batch", "a list of minibatches A batch contains NUM_GPUS minibatches and", "__getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs,", "# Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch", "[6, 7, 8], [9]] >>> list(BatchSampler(range(10), batch_size=3, drop_last=True)) [[0, 1,", "index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db, self._num_classes) #TODO:", "5], [6, 7, 8], [9]] >>> list(BatchSampler(range(10), batch_size=3, drop_last=True)) [[0,", "ignored during `prep_im_for_blob` and 2) cfg.TRAIN.SCALES containing SINGLE scale. Since", "drop_last=False)) [[0, 1, 2], [3, 4, 5], [6, 7, 8],", "cfg.TRAIN.IMS_PER_BATCH right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE -", "from core.config import cfg from roi_data.minibatch import get_minibatch import utils.blob", "cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers for i in range(num_minibatch): left_idx =", "len(ratio_list) def __iter__(self): rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index", "not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \\ batch_size <=", "the rightmost in each batch. target_ratio = ratio_list[right_idx] else: #", "= [] for blobs in list_of_blobs: lists.append({'data' : blobs.pop('data'), 'rois'", "blobs[key].squeeze(axis=0) blobs['data'] = blobs['data'].squeeze(axis=0) return blobs def __len__(self): return self.DATA_SIZE", "> 1: # for ratio > 1, we preserve the", "it to be 1. target_ratio = 1 ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio", "drop_last def __iter__(self): batch = [] for idx in self.sampler:", "[3, 4, 5], [6, 7, 8], [9]] >>> list(BatchSampler(range(10), batch_size=3,", "minibatch = default_collate(mini_list) for key in minibatch: Batch[key].append(minibatch[key]) return Batch", "\"torch.utils.data.Sampler, but got sampler={}\" .format(sampler)) if not isinstance(batch_size, _int_classes) or", "= sampler self.batch_size = batch_size self.drop_last = drop_last def __iter__(self):", "4, 5], [6, 7, 8]] \"\"\" def __init__(self, sampler, batch_size,", "we preserve the leftmost in each batch. target_ratio = ratio_list[left_idx]", "batch_size, drop_last): if not isinstance(sampler, torch_sampler.Sampler): raise ValueError(\"sampler should be", "stack smaples from each minibatch seperately. \"\"\" Batch = {key:", "1, DATA_SIZE - 1) if ratio_list[right_idx] < 1: # for", "- 1) // self.batch_size def collate_minibatch(list_of_blobs): \"\"\"Stack samples seperately and", "in the type of \"list of ndarray\". lists = []", "batch dim # for key in blobs: # if key", "be batch into a tensor. # So we keep roidb", "batch = [] if len(batch) > 0 and not self.drop_last:", "- 1, DATA_SIZE - 1) if ratio_list[right_idx] < 1: #", "batch_size (int): Size of mini-batch. drop_last (bool): If ``True``, the", "bool): raise ValueError(\"drop_last should be a boolean value, but got", "ratio_list[right_idx] else: # for ratio cross 1, we make it", "cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob` and 2) cfg.TRAIN.SCALES containing SINGLE", "np import numpy.random as npr import torch import torch.utils.data as", "= drop_last def __iter__(self): batch = [] for idx in", "len(self._roidb) def __getitem__(self, index_tuple): index, ratio = index_tuple single_db =", "can pad and batch images base on that. \"\"\" DATA_SIZE", "self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return", "got sampler={}\" .format(sampler)) if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool)", "of entries of variable length, it can't be batch into", "self.batch_size: yield batch batch = [] if len(batch) > 0", "index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid =", "numpy.random as npr import torch import torch.utils.data as data import", "= ratio_list self.ratio_index = ratio_index self.num_data = len(ratio_list) def __iter__(self):", "smaples from each minibatch seperately. \"\"\" Batch = {key: []", "mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)] minibatch = default_collate(mini_list) for key", "get_minibatch import utils.blob as blob_utils # from model.rpn.bbox_transform import bbox_transform_inv,", "# Squeeze batch dim # for key in blobs: #", "batch. target_ratio = ratio_list[left_idx] elif ratio_list[left_idx] > 1: # for", "roidb in the type of \"list of ndarray\". lists =", "If ``True``, the sampler will drop the last batch if", ">>> list(BatchSampler(range(10), batch_size=3, drop_last=False)) [[0, 1, 2], [3, 4, 5],", "prepared images will have same min side length of cfg.TRAIN.SCALES[0],", "torch_sampler.Sampler): raise ValueError(\"sampler should be an instance of \" \"torch.utils.data.Sampler,", "< 1, we preserve the leftmost in each batch. target_ratio", "4, 5], [6, 7, 8], [9]] >>> list(BatchSampler(range(10), batch_size=3, drop_last=True))", "not isinstance(drop_last, bool): raise ValueError(\"drop_last should be a boolean value,", "or isinstance(batch_size, bool) or \\ batch_size <= 0: raise ValueError(\"batch_size", "and batch images base on that. \"\"\" DATA_SIZE = len(ratio_list)", "in list_of_blobs: lists.append({'data' : blobs.pop('data'), 'rois' : blobs.pop('rois'), 'labels' :", "return ratio_list_minibatch class MinibatchSampler(torch_sampler.Sampler): def __init__(self, ratio_list, ratio_index): self.ratio_list =", "npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch", "ratio_list, we want to make the RATIO same for each", "self.sampler: batch.append(idx) # Difference: batch.append(int(idx)) if len(batch) == self.batch_size: yield", "1 ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio return ratio_list_minibatch class MinibatchSampler(torch_sampler.Sampler): def __init__(self,", "batch_size=3, drop_last=False)) [[0, 1, 2], [3, 4, 5], [6, 7,", "[] for key in list_of_blobs[0]} # Because roidb consists of", "1, 2], [3, 4, 5], [6, 7, 8], [9]] >>>", "import torch import torch.utils.data as data import torch.utils.data.sampler as torch_sampler", "of cfg.TRAIN.SCALES[0], we can pad and batch images base on", "seperately. \"\"\" Batch = {key: [] for key in list_of_blobs[0]}", "abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py. #", "only work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob` and", "contains NUM_GPUS minibatches and image size in different minibatch may", "entries of variable length, it can't be batch into a", "if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \\ batch_size", "ratio > 1, we preserve the rightmost in each batch.", "def __len__(self): return self.num_data class BatchSampler(torch_sampler.BatchSampler): r\"\"\"Wraps another sampler to", "self._roidb = roidb self._num_classes = num_classes self.training = training self.DATA_SIZE", "drop the last batch if its size would be less", "each batch. target_ratio = ratio_list[right_idx] else: # for ratio cross", "a mini-batch of indices. Args: sampler (Sampler): Base sampler. batch_size", "rightmost in each batch. target_ratio = ratio_list[right_idx] else: # for", "roi_data.minibatch import get_minibatch import utils.blob as blob_utils # from model.rpn.bbox_transform", "ratio_list self.ratio_index = ratio_index self.num_data = len(ratio_list) def __iter__(self): rand_perm", "self.training = training self.DATA_SIZE = len(self._roidb) def __getitem__(self, index_tuple): index,", "we preserve the rightmost in each batch. target_ratio = ratio_list[right_idx]", "cal_minibatch_ratio(ratio_list): \"\"\"Given the ratio_list, we want to make the RATIO", "sampler (Sampler): Base sampler. batch_size (int): Size of mini-batch. drop_last", "ratio_index): self.ratio_list = ratio_list self.ratio_index = ratio_index self.num_data = len(ratio_list)", "seperately and return a list of minibatches A batch contains", "return (len(self.sampler) + self.batch_size - 1) // self.batch_size def collate_minibatch(list_of_blobs):", "different minibatch may be different. Hence, we need to stack", "blobs['data'] = blobs['data'].squeeze(axis=0) return blobs def __len__(self): return self.DATA_SIZE def", ": blobs.pop('labels')}) for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH): mini_list =", "\" \"torch.utils.data.Sampler, but got sampler={}\" .format(sampler)) if not isinstance(batch_size, _int_classes)", "self.ratio_index = ratio_index self.num_data = len(ratio_list) def __iter__(self): rand_perm =", "to make the RATIO same for each minibatch on each", "positive integeral value, \" \"but got batch_size={}\".format(batch_size)) if not isinstance(drop_last,", "!= 'roidb': # blobs[key] = blobs[key].squeeze(axis=0) blobs['data'] = blobs['data'].squeeze(axis=0) return", "def collate_minibatch(list_of_blobs): \"\"\"Stack samples seperately and return a list of", "ratio_list[left_idx] > 1: # for ratio > 1, we preserve", "may be different. Hence, we need to stack smaples from", "to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim #", "torch.utils.data as data import torch.utils.data.sampler as torch_sampler from torch.utils.data.dataloader import", "return self.DATA_SIZE def cal_minibatch_ratio(ratio_list): \"\"\"Given the ratio_list, we want to", ">>> list(BatchSampler(range(10), batch_size=3, drop_last=True)) [[0, 1, 2], [3, 4, 5],", "batch_size self.drop_last = drop_last def __iter__(self): batch = [] for", "and return a list of minibatches A batch contains NUM_GPUS", "for idx in self.sampler: batch.append(idx) # Difference: batch.append(int(idx)) if len(batch)", "training=True): self._roidb = roidb self._num_classes = num_classes self.training = training", "images will have same min side length of cfg.TRAIN.SCALES[0], we", "Include leftovers for i in range(num_minibatch): left_idx = i *", "in torch.utils.data.dataloader.py. # Squeeze batch dim # for key in", "2) cfg.TRAIN.SCALES containing SINGLE scale. Since all prepared images will", "ratio < 1, we preserve the leftmost in each batch.", "idx in self.sampler: batch.append(idx) # Difference: batch.append(int(idx)) if len(batch) ==", "scale. Since all prepared images will have same min side", "iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) def __len__(self): return self.num_data class BatchSampler(torch_sampler.BatchSampler): r\"\"\"Wraps another", "length, it can't be batch into a tensor. # So", "npr import torch import torch.utils.data as data import torch.utils.data.sampler as", "1) // self.batch_size def collate_minibatch(list_of_blobs): \"\"\"Stack samples seperately and return", "be a boolean value, but got \" \"drop_last={}\".format(drop_last)) self.sampler =", "we keep roidb in the type of \"list of ndarray\".", "if len(batch) > 0 and not self.drop_last: yield batch def", "# blobs[key] = blobs[key].squeeze(axis=0) blobs['data'] = blobs['data'].squeeze(axis=0) return blobs def", "+ self.batch_size - 1) // self.batch_size def collate_minibatch(list_of_blobs): \"\"\"Stack samples", "change _worker_loop in torch.utils.data.dataloader.py. # Squeeze batch dim # for", "valid = get_minibatch(single_db, self._num_classes) #TODO: Check if minibatch is valid", "= min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1) if", "\" \"but got batch_size={}\".format(batch_size)) if not isinstance(drop_last, bool): raise ValueError(\"drop_last", "integeral value, \" \"but got batch_size={}\".format(batch_size)) if not isinstance(drop_last, bool):", "DATA_SIZE - 1) if ratio_list[right_idx] < 1: # for ratio", "(int): Size of mini-batch. drop_last (bool): If ``True``, the sampler", "for each minibatch on each GPU. Note: this only work", "minibatch is valid ? If not, abandon it. # Need", "DATA_SIZE = len(ratio_list) ratio_list_minibatch = np.empty((DATA_SIZE,)) num_minibatch = int(np.ceil(DATA_SIZE /", "= self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list", "blobs.pop('rois'), 'labels' : blobs.pop('labels')}) for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH):", "want to make the RATIO same for each minibatch on", "valid ? If not, abandon it. # Need to change", "images base on that. \"\"\" DATA_SIZE = len(ratio_list) ratio_list_minibatch =", "self.ratio_list = ratio_list self.ratio_index = ratio_index self.num_data = len(ratio_list) def", "ratio_list_minibatch = np.empty((DATA_SIZE,)) num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include", "sampler will drop the last batch if its size would", "class MinibatchSampler(torch_sampler.Sampler): def __init__(self, ratio_list, ratio_index): self.ratio_list = ratio_list self.ratio_index", "i in range(num_minibatch): left_idx = i * cfg.TRAIN.IMS_PER_BATCH right_idx =", "make it to be 1. target_ratio = 1 ratio_list_minibatch[left_idx:(right_idx+1)] =", "last batch if its size would be less than ``batch_size``", "than ``batch_size`` Example: >>> list(BatchSampler(range(10), batch_size=3, drop_last=False)) [[0, 1, 2],", "ValueError(\"sampler should be an instance of \" \"torch.utils.data.Sampler, but got", "self.batch_size else: return (len(self.sampler) + self.batch_size - 1) // self.batch_size", "roidb consists of entries of variable length, it can't be", "image size in different minibatch may be different. Hence, we", "cfg.TRAIN.SCALES[0], we can pad and batch images base on that.", "minibatch seperately. \"\"\" Batch = {key: [] for key in", "\"\"\" def __init__(self, sampler, batch_size, drop_last): if not isinstance(sampler, torch_sampler.Sampler):", "self.drop_last: return len(self.sampler) // self.batch_size else: return (len(self.sampler) + self.batch_size", "same min side length of cfg.TRAIN.SCALES[0], we can pad and", "side length of cfg.TRAIN.SCALES[0], we can pad and batch images", "(Sampler): Base sampler. batch_size (int): Size of mini-batch. drop_last (bool):", "samples seperately and return a list of minibatches A batch", "keep roidb in the type of \"list of ndarray\". lists", "each minibatch on each GPU. Note: this only work for", "we make it to be 1. target_ratio = 1 ratio_list_minibatch[left_idx:(right_idx+1)]", "1: # for ratio < 1, we preserve the leftmost", "< 1: # for ratio < 1, we preserve the", "single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db, self._num_classes) #TODO: Check", "def __len__(self): return self.DATA_SIZE def cal_minibatch_ratio(ratio_list): \"\"\"Given the ratio_list, we", "boolean value, but got \" \"drop_last={}\".format(drop_last)) self.sampler = sampler self.batch_size", "Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored during", "less than ``batch_size`` Example: >>> list(BatchSampler(range(10), batch_size=3, drop_last=False)) [[0, 1,", "import cfg from roi_data.minibatch import get_minibatch import utils.blob as blob_utils", "not, abandon it. # Need to change _worker_loop in torch.utils.data.dataloader.py.", "all prepared images will have same min side length of", "\" \"drop_last={}\".format(drop_last)) self.sampler = sampler self.batch_size = batch_size self.drop_last =", "import numpy as np import numpy.random as npr import torch", "= num_classes self.training = training self.DATA_SIZE = len(self._roidb) def __getitem__(self,", "for ratio < 1, we preserve the leftmost in each", "ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) def __len__(self):", "RATIO same for each minibatch on each GPU. Note: this", "= [] for idx in self.sampler: batch.append(idx) # Difference: batch.append(int(idx))", "lists = [] for blobs in list_of_blobs: lists.append({'data' : blobs.pop('data'),", "into a tensor. # So we keep roidb in the", "mini-batch of indices. Args: sampler (Sampler): Base sampler. batch_size (int):", "if self.drop_last: return len(self.sampler) // self.batch_size else: return (len(self.sampler) +", "isinstance(sampler, torch_sampler.Sampler): raise ValueError(\"sampler should be an instance of \"", "np.empty((DATA_SIZE,)) num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers for", "if key != 'roidb': # blobs[key] = blobs[key].squeeze(axis=0) blobs['data'] =", "Since all prepared images will have same min side length", "sampler. batch_size (int): Size of mini-batch. drop_last (bool): If ``True``,", "on each GPU. Note: this only work for 1) cfg.TRAIN.MAX_SIZE", "blobs['data'].squeeze(axis=0) return blobs def __len__(self): return self.DATA_SIZE def cal_minibatch_ratio(ratio_list): \"\"\"Given", "return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) def __len__(self): return self.num_data class BatchSampler(torch_sampler.BatchSampler): r\"\"\"Wraps", "Difference: batch.append(int(idx)) if len(batch) == self.batch_size: yield batch batch =", "leftovers for i in range(num_minibatch): left_idx = i * cfg.TRAIN.IMS_PER_BATCH", "== self.batch_size: yield batch batch = [] if len(batch) >", "be 1. target_ratio = 1 ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio return ratio_list_minibatch", "cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) def __len__(self): return self.num_data class BatchSampler(torch_sampler.BatchSampler):", "def __init__(self, roidb, num_classes, training=True): self._roidb = roidb self._num_classes =", "blobs[key] = blobs[key].squeeze(axis=0) blobs['data'] = blobs['data'].squeeze(axis=0) return blobs def __len__(self):", "'labels' : blobs.pop('labels')}) for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH): mini_list", "Args: sampler (Sampler): Base sampler. batch_size (int): Size of mini-batch.", "same for each minibatch on each GPU. Note: this only", "an instance of \" \"torch.utils.data.Sampler, but got sampler={}\" .format(sampler)) if", "def __len__(self): if self.drop_last: return len(self.sampler) // self.batch_size else: return", "collate_minibatch(list_of_blobs): \"\"\"Stack samples seperately and return a list of minibatches", "# if key != 'roidb': # blobs[key] = blobs[key].squeeze(axis=0) blobs['data']", "ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) def __len__(self): return self.num_data", "of minibatches A batch contains NUM_GPUS minibatches and image size", "minibatch may be different. Hence, we need to stack smaples", "__len__(self): return self.num_data class BatchSampler(torch_sampler.BatchSampler): r\"\"\"Wraps another sampler to yield", "in each batch. target_ratio = ratio_list[right_idx] else: # for ratio", "8]] \"\"\" def __init__(self, sampler, batch_size, drop_last): if not isinstance(sampler,", "need to stack smaples from each minibatch seperately. \"\"\" Batch", "= self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list)", "__iter__(self): batch = [] for idx in self.sampler: batch.append(idx) #", "SINGLE scale. Since all prepared images will have same min", "pad and batch images base on that. \"\"\" DATA_SIZE =", "of \" \"torch.utils.data.Sampler, but got sampler={}\" .format(sampler)) if not isinstance(batch_size,", "of ndarray\". lists = [] for blobs in list_of_blobs: lists.append({'data'", "__len__(self): return self.DATA_SIZE def cal_minibatch_ratio(ratio_list): \"\"\"Given the ratio_list, we want", "Hence, we need to stack smaples from each minibatch seperately.", "in self.sampler: batch.append(idx) # Difference: batch.append(int(idx)) if len(batch) == self.batch_size:", "list_of_blobs: lists.append({'data' : blobs.pop('data'), 'rois' : blobs.pop('rois'), 'labels' : blobs.pop('labels')})", "ValueError(\"drop_last should be a boolean value, but got \" \"drop_last={}\".format(drop_last))", "Because roidb consists of entries of variable length, it can't", "# from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes class RoiDataLoader(data.Dataset): def __init__(self,", "0: raise ValueError(\"batch_size should be a positive integeral value, \"", "list_of_blobs[0]} # Because roidb consists of entries of variable length,", "= int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers for i in", "list(BatchSampler(range(10), batch_size=3, drop_last=False)) [[0, 1, 2], [3, 4, 5], [6,", "len(self.sampler) // self.batch_size else: return (len(self.sampler) + self.batch_size - 1)", "get_minibatch(single_db, self._num_classes) #TODO: Check if minibatch is valid ? If", "num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers for i", "or \\ batch_size <= 0: raise ValueError(\"batch_size should be a", "length of cfg.TRAIN.SCALES[0], we can pad and batch images base", "got \" \"drop_last={}\".format(drop_last)) self.sampler = sampler self.batch_size = batch_size self.drop_last", "a tensor. # So we keep roidb in the type", "_int_classes from core.config import cfg from roi_data.minibatch import get_minibatch import", "\"list of ndarray\". lists = [] for blobs in list_of_blobs:", "def __init__(self, ratio_list, ratio_index): self.ratio_list = ratio_list self.ratio_index = ratio_index", "the leftmost in each batch. target_ratio = ratio_list[left_idx] elif ratio_list[left_idx]", "= len(self._roidb) def __getitem__(self, index_tuple): index, ratio = index_tuple single_db", "be a positive integeral value, \" \"but got batch_size={}\".format(batch_size)) if", "batch into a tensor. # So we keep roidb in", "will drop the last batch if its size would be", "consists of entries of variable length, it can't be batch", "self.DATA_SIZE def cal_minibatch_ratio(ratio_list): \"\"\"Given the ratio_list, we want to make", "self.num_data = len(ratio_list) def __iter__(self): rand_perm = npr.permutation(self.num_data) ratio_list =", "7, 8], [9]] >>> list(BatchSampler(range(10), batch_size=3, drop_last=True)) [[0, 1, 2],", "// self.batch_size def collate_minibatch(list_of_blobs): \"\"\"Stack samples seperately and return a", "cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1) if ratio_list[right_idx] < 1:", "= len(ratio_list) def __iter__(self): rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm]", "in different minibatch may be different. Hence, we need to", "a positive integeral value, \" \"but got batch_size={}\".format(batch_size)) if not", "from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes class RoiDataLoader(data.Dataset): def __init__(self, roidb,", "[6, 7, 8]] \"\"\" def __init__(self, sampler, batch_size, drop_last): if", "raise ValueError(\"batch_size should be a positive integeral value, \" \"but", "(bool): If ``True``, the sampler will drop the last batch", "i * cfg.TRAIN.IMS_PER_BATCH right_idx = min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1,", "len(batch) == self.batch_size: yield batch batch = [] if len(batch)", "roidb self._num_classes = num_classes self.training = training self.DATA_SIZE = len(self._roidb)", "for ratio cross 1, we make it to be 1.", "``batch_size`` Example: >>> list(BatchSampler(range(10), batch_size=3, drop_last=False)) [[0, 1, 2], [3,", "target_ratio return ratio_list_minibatch class MinibatchSampler(torch_sampler.Sampler): def __init__(self, ratio_list, ratio_index): self.ratio_list", "1) if ratio_list[right_idx] < 1: # for ratio < 1,", "<= 0: raise ValueError(\"batch_size should be a positive integeral value,", "leftmost in each batch. target_ratio = ratio_list[left_idx] elif ratio_list[left_idx] >", "= roidb self._num_classes = num_classes self.training = training self.DATA_SIZE =", "8], [9]] >>> list(BatchSampler(range(10), batch_size=3, drop_last=True)) [[0, 1, 2], [3,", "self.batch_size def collate_minibatch(list_of_blobs): \"\"\"Stack samples seperately and return a list", "ratio = index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db,", "have same min side length of cfg.TRAIN.SCALES[0], we can pad", "def cal_minibatch_ratio(ratio_list): \"\"\"Given the ratio_list, we want to make the", "return self.num_data class BatchSampler(torch_sampler.BatchSampler): r\"\"\"Wraps another sampler to yield a", "{key: [] for key in list_of_blobs[0]} # Because roidb consists", "yield batch def __len__(self): if self.drop_last: return len(self.sampler) // self.batch_size", "def __init__(self, sampler, batch_size, drop_last): if not isinstance(sampler, torch_sampler.Sampler): raise", "GPU. Note: this only work for 1) cfg.TRAIN.MAX_SIZE is ignored", "# for key in blobs: # if key != 'roidb':", "= len(ratio_list) ratio_list_minibatch = np.empty((DATA_SIZE,)) num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH))", "batch def __len__(self): if self.drop_last: return len(self.sampler) // self.batch_size else:", "index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]] blobs, valid", "return a list of minibatches A batch contains NUM_GPUS minibatches", "def __iter__(self): rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index =", "re-calculate minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist()))", "ratio_list[right_idx] < 1: # for ratio < 1, we preserve", "1, we make it to be 1. target_ratio = 1", "_int_classes) or isinstance(batch_size, bool) or \\ batch_size <= 0: raise", "of mini-batch. drop_last (bool): If ``True``, the sampler will drop", "indices. Args: sampler (Sampler): Base sampler. batch_size (int): Size of", "core.config import cfg from roi_data.minibatch import get_minibatch import utils.blob as", "but got \" \"drop_last={}\".format(drop_last)) self.sampler = sampler self.batch_size = batch_size", "be different. Hence, we need to stack smaples from each", "but got sampler={}\" .format(sampler)) if not isinstance(batch_size, _int_classes) or isinstance(batch_size,", "NUM_GPUS minibatches and image size in different minibatch may be", "be less than ``batch_size`` Example: >>> list(BatchSampler(range(10), batch_size=3, drop_last=False)) [[0,", "the type of \"list of ndarray\". lists = [] for", "torch.utils.data.dataloader import default_collate from torch._six import int_classes as _int_classes from", "math import numpy as np import numpy.random as npr import", "2], [3, 4, 5], [6, 7, 8]] \"\"\" def __init__(self,", "batch_size=3, drop_last=True)) [[0, 1, 2], [3, 4, 5], [6, 7,", "ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio", "containing SINGLE scale. Since all prepared images will have same", "if minibatch is valid ? If not, abandon it. #", "drop_last): if not isinstance(sampler, torch_sampler.Sampler): raise ValueError(\"sampler should be an", "range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH): mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)] minibatch =", ": blobs.pop('data'), 'rois' : blobs.pop('rois'), 'labels' : blobs.pop('labels')}) for i", "each GPU. Note: this only work for 1) cfg.TRAIN.MAX_SIZE is", "= lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)] minibatch = default_collate(mini_list) for key in", "minibatches and image size in different minibatch may be different.", "sampler to yield a mini-batch of indices. Args: sampler (Sampler):", "not isinstance(sampler, torch_sampler.Sampler): raise ValueError(\"sampler should be an instance of", "its size would be less than ``batch_size`` Example: >>> list(BatchSampler(range(10),", "len(batch) > 0 and not self.drop_last: yield batch def __len__(self):", "torch.utils.data.dataloader.py. # Squeeze batch dim # for key in blobs:", "batch = [] for idx in self.sampler: batch.append(idx) # Difference:", "from each minibatch seperately. \"\"\" Batch = {key: [] for", "ratio_index self.num_data = len(ratio_list) def __iter__(self): rand_perm = npr.permutation(self.num_data) ratio_list", "cfg from roi_data.minibatch import get_minibatch import utils.blob as blob_utils #", "= ratio_index self.num_data = len(ratio_list) def __iter__(self): rand_perm = npr.permutation(self.num_data)", "will have same min side length of cfg.TRAIN.SCALES[0], we can", "\"\"\" Batch = {key: [] for key in list_of_blobs[0]} #", "raise ValueError(\"drop_last should be a boolean value, but got \"", "on that. \"\"\" DATA_SIZE = len(ratio_list) ratio_list_minibatch = np.empty((DATA_SIZE,)) num_minibatch", "return blobs def __len__(self): return self.DATA_SIZE def cal_minibatch_ratio(ratio_list): \"\"\"Given the", "target_ratio = 1 ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio return ratio_list_minibatch class MinibatchSampler(torch_sampler.Sampler):", "is valid ? If not, abandon it. # Need to", "= {key: [] for key in list_of_blobs[0]} # Because roidb", "= ratio_list[left_idx] elif ratio_list[left_idx] > 1: # for ratio >", "another sampler to yield a mini-batch of indices. Args: sampler", "minibatch ratio list ratio_list_minibatch = cal_minibatch_ratio(ratio_list) return iter(zip(ratio_index.tolist(), ratio_list_minibatch.tolist())) def", "batch.append(int(idx)) if len(batch) == self.batch_size: yield batch batch = []", "work for 1) cfg.TRAIN.MAX_SIZE is ignored during `prep_im_for_blob` and 2)", "self.drop_last: yield batch def __len__(self): if self.drop_last: return len(self.sampler) //", "ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio return ratio_list_minibatch class MinibatchSampler(torch_sampler.Sampler): def __init__(self, ratio_list,", "not self.drop_last: yield batch def __len__(self): if self.drop_last: return len(self.sampler)", "if its size would be less than ``batch_size`` Example: >>>", "len(ratio_list) ratio_list_minibatch = np.empty((DATA_SIZE,)) num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) #", "ValueError(\"batch_size should be a positive integeral value, \" \"but got", "= index_tuple single_db = [self._roidb[index]] blobs, valid = get_minibatch(single_db, self._num_classes)", "utils.blob as blob_utils # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes class", "make the RATIO same for each minibatch on each GPU.", "preserve the leftmost in each batch. target_ratio = ratio_list[left_idx] elif", "len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH): mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)] minibatch = default_collate(mini_list)", "> 1, we preserve the rightmost in each batch. target_ratio", "import int_classes as _int_classes from core.config import cfg from roi_data.minibatch", "[9]] >>> list(BatchSampler(range(10), batch_size=3, drop_last=True)) [[0, 1, 2], [3, 4,", "num_classes self.training = training self.DATA_SIZE = len(self._roidb) def __getitem__(self, index_tuple):", "`prep_im_for_blob` and 2) cfg.TRAIN.SCALES containing SINGLE scale. Since all prepared", "\"but got batch_size={}\".format(batch_size)) if not isinstance(drop_last, bool): raise ValueError(\"drop_last should", "if len(batch) == self.batch_size: yield batch batch = [] if", "as data import torch.utils.data.sampler as torch_sampler from torch.utils.data.dataloader import default_collate", "def __getitem__(self, index_tuple): index, ratio = index_tuple single_db = [self._roidb[index]]", "lists.append({'data' : blobs.pop('data'), 'rois' : blobs.pop('rois'), 'labels' : blobs.pop('labels')}) for", "the last batch if its size would be less than", "isinstance(drop_last, bool): raise ValueError(\"drop_last should be a boolean value, but", "= np.empty((DATA_SIZE,)) num_minibatch = int(np.ceil(DATA_SIZE / cfg.TRAIN.IMS_PER_BATCH)) # Include leftovers", "cross 1, we make it to be 1. target_ratio =", "for blobs in list_of_blobs: lists.append({'data' : blobs.pop('data'), 'rois' : blobs.pop('rois'),", "= target_ratio return ratio_list_minibatch class MinibatchSampler(torch_sampler.Sampler): def __init__(self, ratio_list, ratio_index):", "return len(self.sampler) // self.batch_size else: return (len(self.sampler) + self.batch_size -", "as npr import torch import torch.utils.data as data import torch.utils.data.sampler", "(len(self.sampler) + self.batch_size - 1) // self.batch_size def collate_minibatch(list_of_blobs): \"\"\"Stack", "[] for idx in self.sampler: batch.append(idx) # Difference: batch.append(int(idx)) if", "batch batch = [] if len(batch) > 0 and not", "batch.append(idx) # Difference: batch.append(int(idx)) if len(batch) == self.batch_size: yield batch", ".format(sampler)) if not isinstance(batch_size, _int_classes) or isinstance(batch_size, bool) or \\", "in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH): mini_list = lists[i:(i + cfg.TRAIN.IMS_PER_BATCH)] minibatch", "key != 'roidb': # blobs[key] = blobs[key].squeeze(axis=0) blobs['data'] = blobs['data'].squeeze(axis=0)", "yield a mini-batch of indices. Args: sampler (Sampler): Base sampler.", "5], [6, 7, 8]] \"\"\" def __init__(self, sampler, batch_size, drop_last):", "num_classes, training=True): self._roidb = roidb self._num_classes = num_classes self.training =", "0 and not self.drop_last: yield batch def __len__(self): if self.drop_last:", "self.batch_size = batch_size self.drop_last = drop_last def __iter__(self): batch =", "blob_utils # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes class RoiDataLoader(data.Dataset): def", "and 2) cfg.TRAIN.SCALES containing SINGLE scale. Since all prepared images", "from torch.utils.data.dataloader import default_collate from torch._six import int_classes as _int_classes", "2], [3, 4, 5], [6, 7, 8], [9]] >>> list(BatchSampler(range(10),", "key in list_of_blobs[0]} # Because roidb consists of entries of", "blobs, valid = get_minibatch(single_db, self._num_classes) #TODO: Check if minibatch is", "preserve the rightmost in each batch. target_ratio = ratio_list[right_idx] else:", "to yield a mini-batch of indices. Args: sampler (Sampler): Base", "sampler, batch_size, drop_last): if not isinstance(sampler, torch_sampler.Sampler): raise ValueError(\"sampler should", "torch.utils.data.sampler as torch_sampler from torch.utils.data.dataloader import default_collate from torch._six import", "\\ batch_size <= 0: raise ValueError(\"batch_size should be a positive", "? If not, abandon it. # Need to change _worker_loop", "__iter__(self): rand_perm = npr.permutation(self.num_data) ratio_list = self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm]", "different. Hence, we need to stack smaples from each minibatch", "= training self.DATA_SIZE = len(self._roidb) def __getitem__(self, index_tuple): index, ratio", "# for ratio > 1, we preserve the rightmost in", "import bbox_transform_inv, clip_boxes class RoiDataLoader(data.Dataset): def __init__(self, roidb, num_classes, training=True):", "self.ratio_list[rand_perm] ratio_index = self.ratio_index[rand_perm] # re-calculate minibatch ratio list ratio_list_minibatch", "sampler self.batch_size = batch_size self.drop_last = drop_last def __iter__(self): batch", "[[0, 1, 2], [3, 4, 5], [6, 7, 8]] \"\"\"", "should be an instance of \" \"torch.utils.data.Sampler, but got sampler={}\"", "self.drop_last = drop_last def __iter__(self): batch = [] for idx", "for i in range(0, len(list_of_blobs), cfg.TRAIN.IMS_PER_BATCH): mini_list = lists[i:(i +", "blobs.pop('data'), 'rois' : blobs.pop('rois'), 'labels' : blobs.pop('labels')}) for i in", "Size of mini-batch. drop_last (bool): If ``True``, the sampler will", "# So we keep roidb in the type of \"list", "Check if minibatch is valid ? If not, abandon it.", "self.sampler = sampler self.batch_size = batch_size self.drop_last = drop_last def", "of variable length, it can't be batch into a tensor.", "value, but got \" \"drop_last={}\".format(drop_last)) self.sampler = sampler self.batch_size =", "Batch = {key: [] for key in list_of_blobs[0]} # Because", "clip_boxes class RoiDataLoader(data.Dataset): def __init__(self, roidb, num_classes, training=True): self._roidb =", "to be 1. target_ratio = 1 ratio_list_minibatch[left_idx:(right_idx+1)] = target_ratio return", "self.batch_size - 1) // self.batch_size def collate_minibatch(list_of_blobs): \"\"\"Stack samples seperately", "bool) or \\ batch_size <= 0: raise ValueError(\"batch_size should be", "r\"\"\"Wraps another sampler to yield a mini-batch of indices. Args:", "min side length of cfg.TRAIN.SCALES[0], we can pad and batch", "self._num_classes) #TODO: Check if minibatch is valid ? If not,", "tensor. # So we keep roidb in the type of", "else: # for ratio cross 1, we make it to", "value, \" \"but got batch_size={}\".format(batch_size)) if not isinstance(drop_last, bool): raise", "[3, 4, 5], [6, 7, 8]] \"\"\" def __init__(self, sampler,", "- 1) if ratio_list[right_idx] < 1: # for ratio <", "So we keep roidb in the type of \"list of", "we want to make the RATIO same for each minibatch", "# for ratio cross 1, we make it to be", "'roidb': # blobs[key] = blobs[key].squeeze(axis=0) blobs['data'] = blobs['data'].squeeze(axis=0) return blobs", "for key in blobs: # if key != 'roidb': #", "default_collate from torch._six import int_classes as _int_classes from core.config import", "and image size in different minibatch may be different. Hence,", "int_classes as _int_classes from core.config import cfg from roi_data.minibatch import", "mini-batch. drop_last (bool): If ``True``, the sampler will drop the", "else: return (len(self.sampler) + self.batch_size - 1) // self.batch_size def", "it. # Need to change _worker_loop in torch.utils.data.dataloader.py. # Squeeze", "A batch contains NUM_GPUS minibatches and image size in different", "as blob_utils # from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes class RoiDataLoader(data.Dataset):", "import numpy.random as npr import torch import torch.utils.data as data", "// self.batch_size else: return (len(self.sampler) + self.batch_size - 1) //", "\"drop_last={}\".format(drop_last)) self.sampler = sampler self.batch_size = batch_size self.drop_last = drop_last", "* cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1) if ratio_list[right_idx] <", "def __iter__(self): batch = [] for idx in self.sampler: batch.append(idx)", "drop_last (bool): If ``True``, the sampler will drop the last", "base on that. \"\"\" DATA_SIZE = len(ratio_list) ratio_list_minibatch = np.empty((DATA_SIZE,))", "torch._six import int_classes as _int_classes from core.config import cfg from", "type of \"list of ndarray\". lists = [] for blobs", "class BatchSampler(torch_sampler.BatchSampler): r\"\"\"Wraps another sampler to yield a mini-batch of", "min((i+1) * cfg.TRAIN.IMS_PER_BATCH - 1, DATA_SIZE - 1) if ratio_list[right_idx]", "if not isinstance(drop_last, bool): raise ValueError(\"drop_last should be a boolean", "as np import numpy.random as npr import torch import torch.utils.data", "= ratio_list[right_idx] else: # for ratio cross 1, we make", "from roi_data.minibatch import get_minibatch import utils.blob as blob_utils # from" ]
[ "# Let's rescale back the coefficients returned by sklearn before", "< 0 _, _, coefs = \\ linear_model.lars_path(X, y, return_path=True,", "we need to turn off normalization lars = linear_model.LassoLars(.1, normalize=False)", "normalize=False).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) #", "the optimal alpha # increases as the number of samples", "= np.dot(X.T, y) for method in 'lar', 'lasso': output =", "[linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]) def test_lars_precompute(classifier): # Check for different values", "[True, False]) def test_lasso_lars_fit_copyX_behaviour(copy_X): \"\"\" Test that user input to", "assert_less(error, 0.01) def test_lasso_lars_vs_R_implementation(): # Test that sklearn LassoLars implementation", "0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291]]) X = x.T", "0 def test_lasso_lars_ic(): # Test the LassoLarsIC object by checking", "positive option for all estimator classes default_parameter = {'fit_intercept': False}", "# Note: When normalize is equal to True, R returns", "import assert_allclose from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal", "estimator in estimators: estimator.fit(X, Y) Y_pred = estimator.predict(X) alphas, active,", "of the path _, _, coef_path_ = linear_model.lars_path(X, y, method='lasso')", "same function here default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars':", "[-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291]]) X = x.T ########################################################################### #", "Lars-Lasso algorithm does not converge to # the least-squares-solution for", "X = rng.rand(n_samples, 5) y = np.zeros(n_samples) _, _, coef_path_", "as compared with the above test without the positive option.", "tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an", "def _assert_same_lars_path_result(output1, output2): assert_equal(len(output1), len(output2)) for o1, o2 in zip(output1,", "['lar', 'lasso']) @pytest.mark.parametrize('return_path', [True, False]) def test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result( linear_model.lars_path_gram(", "alpha_bic > alpha_aic # - n_nonzero_bic < n_nonzero_aic lars_bic =", "test_lasso_lars_path_length(): # Test that the path length of the LassoLars", "TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test): # The following should", "receiving multidimensional y do the right thing Y = np.vstack([y,", "1.14 rcond = None if LooseVersion(np.__version__) >= '1.14' else -1", "np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert np.all(np.isfinite(clf.coef_)) def", "coefficients in # their original units, that is, they are", "= linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_R_implementation(): # Test", "decreasing assert np.all(np.diff(lasso.alphas_) < 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso", "output_pre): assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore: `rcond` parameter will change') # numpy", "coordinate descent give the # same results. X = 3", "# 1) fit_intercept=False and normalize=False # 2) fit_intercept=True and normalize=True", "# normalized data X = diabetes.data alphas, _, lasso_path =", "results. X = 3 * diabetes.data alphas, _, lasso_path =", "in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0 lasso_cd.alpha = a", "# This test is basically a copy of the above", "= linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_path_length(): #", "G = np.dot(X.T, X) Xy = np.dot(X.T, y) for method", "min(estimator.coef_) >= 0 def test_lasso_lars_vs_lasso_cd_positive(): # Test that LassoLars and", "= np.zeros((m, 1)) i = np.arange(0, m) rng.shuffle(i) supp =", "0.85784863, 0, 0, 0], [0.30114139, -0.07501577, 0.80895216, 0, 0], [-0.01460346,", "tol=1e-8, positive=True) for c, a in zip(lasso_path.T, alphas): if a", "= np.dot(X, w) sigma = 0.2 y += sigma *", "active set assert ocur == X.shape[1] finally: sys.stdout = old_stdout", "ocur == X.shape[1] def _assert_same_lars_path_result(output1, output2): assert_equal(len(output1), len(output2)) for o1,", "collinearity in input X = np.array([[3., 3., 1.], [2., 2.,", "- lasso_cd.coef_) assert_less(error, 0.01) # similar test, with the classifiers", "import warnings from distutils.version import LooseVersion import numpy as np", "_, _, coef_path_ = linear_model.lars_path( X, y, method='lar', verbose=10) sys.stdout", "np.zeros(n_samples) _, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False, copy_Gram=False,", "pytest from scipy import linalg from sklearn.model_selection import train_test_split from", "in this same function here default_parameter = {'fit_intercept': False} estimator_parameter_map", "that, therefore, we need to do this step before comparing", "using the following code: # # library(lars) # model_lasso_lars =", "a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error", "are rescaled back, whereas sklearn # does not do that,", "0.18064514, -0.0803561, 0.40427291]]) X = x.T ########################################################################### # Scenario 1:", "# same results. X = 3 * diabetes.data alphas, _,", "warnings.catch_warnings(record=True) as w: rng = np.random.RandomState(42) x = rng.randn(len(y)) X", "# trace=TRUE, normalize=TRUE) # r2 = t(model_lasso_lars2$beta) r2 = np.array([[0,", "object by checking that the optimal alpha # increases as", "when early stopping is used. # (test : before, in", "w: rng = np.random.RandomState(42) x = rng.randn(len(y)) X = diabetes.data", "alpha # increases as the number of samples increases. #", "linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error =", "* linalg.norm(coef_cd_, 1)) assert_less(obj_lars, obj_cd * (1. + 1e-8)) def", "* linalg.norm(coef, 1)) lars = linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit, X,", "linalg.norm(y - np.dot(X, coef_lars_)) ** 2 + .1 * linalg.norm(coef_lars_,", "y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha = lars_cv.alpha_ assert not hasattr(lars_cv, 'n_nonzero_coefs')", "# strategy for this but this is no longer the", "len(w) == 0 def test_lasso_lars_ic(): # Test the LassoLarsIC object", "Xy = np.dot(X.T, y) n_samples = y.size def test_simple(): #", "diabetes dataset # ensure that we get negative coefficients when", "is just a # property of the given dataset, with", "# assert_raises(ValueError, linear_model.lars_path, diabetes['data'], # diabetes['target'], method='lar', positive=True) with pytest.warns(DeprecationWarning,", "_, coef_path_ = linear_model.lars_path(X, y, method='lasso') coef_lstsq = np.linalg.lstsq(X, y)[0]", "give the same answers # Note it used to be", "not actually guaranteed in general and is just a #", "in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha =", "getattr(linear_model, estname)(positive=True, **params) estimator.fit(X, y) assert min(estimator.coef_) >= 0 def", "def test_no_path(): # Test that the ``return_path=False`` option returns the", "with the LassoLars # implementation available in R (lars library)", "= linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False) model_lasso_lars.fit(X, y) skl_betas = model_lasso_lars.coef_path_ assert_array_almost_equal(r,", "zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0 lasso_cd.alpha = a lasso_cd.fit(X,", "@ignore_warnings def test_multitarget(): # Assure that estimators receiving multidimensional y", "def test_lars_cv_max_iter(): with warnings.catch_warnings(record=True) as w: rng = np.random.RandomState(42) x", "@pytest.mark.filterwarnings('ignore: `rcond` parameter will change') # numpy deprecation def test_lars_lstsq():", "Scenario 1: Let's compare R vs sklearn when fit_intercept=False and", "on the diabetes dataset # ensure that we get negative", "np.linspace(6e-1, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False,", "way # as coordinate descent Lasso y = [5, 0,", "error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) # same", "lars_coef_ = lars.coef_ lars_obj = objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4,", "of this function # we do the test on the", "<reponame>andywu113/fuhe_predict<gh_stars>1-10 import warnings from distutils.version import LooseVersion import numpy as", "can go into the active set assert ocur == X.shape[1]", "the LARS has to go # far in the path", "= 0 lars_cv = linear_model.LassoLarsCV() for length in (400, 200,", "linear_model.lars_path(X, y, Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0., method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_,", "= linear_model.LassoLars(alpha=0, fit_intercept=True, normalize=True) model_lasso_lars2.fit(X, y) skl_betas2 = model_lasso_lars2.coef_path_ #", "def test_lars_cv(): # Test the LassoLarsCV object by checking that", "the coefficients returned by sklearn before comparing # against the", "_lars_path_residues, LassoLarsIC # TODO: use another dataset that has multiple", "of precompute G = np.dot(X.T, X) clf = classifier(precompute=G) output_1", "0.01) def test_lasso_lars_vs_R_implementation(): # Test that sklearn LassoLars implementation agrees", "np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur =", "rng = np.random.RandomState(0) f = ignore_warnings _, _, coef_path_ =", "# similar test, with the classifiers for alpha in np.linspace(1e-2,", "n_samples=n_samples, method=method, return_path=return_path), linear_model.lars_path( X, y, Gram=G, method=method, return_path=return_path)) def", "to # the smallest alpha reached by the Lars-Lasso algorithm", "X = diabetes.data[:length] y = diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_)", "with precomputed Gram and Xy gives the right answer G", "np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_fit_copyX_behaviour(copy_X): \"\"\" Test that", "the path length of the LassoLars is right lasso =", "positive=True # for method 'lar' (default) and lasso # Once", "Y) Y_pred = estimator.predict(X) alphas, active, coef, path = (estimator.alphas_,", "y, method=method) output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method) for", "descent give the # same results. X = 3 *", "1e-3) # same test, with normalized data X = diabetes.data", "the positive parameter on the lars_path method # the estimator", "estimator_parameter_map: params = default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False, **params)", "_, coef_path_ = linear_model.lars_path( X, y, method='lar', verbose=10) sys.stdout =", "X) Xy = np.dot(X.T, y) alphas_, _, coef_path_ = linear_model.lars_path(", "model_lasso_lars2.coef_path_ # Let's rescale back the coefficients returned by sklearn", "this function # we do the test on the diabetes", "need to do this step before comparing # their results.", "numpy deprecation def test_lasso_gives_lstsq_solution(): # Test that Lars Lasso gives", "following code: # # library(lars) # model_lasso_lars = lars(X, t(y),", "estname)(positive=True, **params) estimator.fit(X, y) assert min(estimator.coef_) >= 0 def test_lasso_lars_vs_lasso_cd_positive():", "was until at least version 0.21) \"\"\" lasso_lars = LassoLarsIC(copy_X=copy_X,", "assert_warns from sklearn.utils.testing import TempMemmap from sklearn.exceptions import ConvergenceWarning from", "The default value of cv') # 0.22 @pytest.mark.parametrize( 'classifier', [linear_model.Lars,", "np.c_[X, x, x] # add correlated features lars_cv = linear_model.LassoLarsCV(max_iter=5)", "and normalize=True # Let's generate the data used in the", "0, -1.569380717440311, -5.924804108067312, -7.996385265061972]]) model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True, normalize=True) model_lasso_lars2.fit(X,", "# and all positive when positive=True # for method 'lar'", "= linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X = diabetes.data X =", "# https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X, y, random_state=42) with TempMemmap(splitted_data) as", "= linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) # same test,", "memory mapping on large input, the # fold data is", "method='lar') alpha_, _, coef = linear_model.lars_path( X, y, method='lar', return_path=False)", "np.linspace(1e-2, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y)", "# assure that at least some features get added if", "of the LassoLars is right lasso = linear_model.LassoLars() lasso.fit(X, y)", "assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an ill-conditioned situation", "estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} @pytest.mark.filterwarnings('ignore:", "the middle part, the comparison of coefficient values # for", "2.816221090636795], [0, 0, -1.218422599914637, -3.457726183014808, -4.021304522060710, -45.827461592423745, -47.776608869312305, -47.911561610746404, -47.914845922736234,", "c, a in zip(lasso_path.T, alphas): if a == 0: continue", "is done use these: # assert_raises(ValueError, linear_model.lars_path, diabetes['data'], # diabetes['target'],", "alpha in np.linspace(1e-2, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(alpha=alpha,", "assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred) @pytest.mark.filterwarnings('ignore: The default", "StringIO import sys old_stdout = sys.stdout try: sys.stdout = StringIO()", "step before comparing # their results. ########################################################################### # # The", "active set assert ocur == X.shape[1] def _assert_same_lars_path_result(output1, output2): assert_equal(len(output1),", "f(linear_model.lars_path)(X, y, alpha_min=0.01) assert not np.isnan(coef_path_).any() residual = np.dot(X, coef_path_[:,", "in the path to converge, and check that LARS and", "sequence of alphas is always decreasing assert np.all(np.diff(lasso.alphas_) < 0)", "np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False):", "features get added if necessary # test for 6d2b4c #", "coefs.min() < 0 _, _, coefs = \\ linear_model.lars_path(X, y,", "G = np.dot(X.T, X) Xy = np.dot(X.T, y) n_samples =", "0, 0, 0.025219751009936], [0, -3.577397088285891, -4.702795355871871, -7.016748621359461, -7.614898471899412, -0.336938391359179, 0,", "= np.array([[3., 3., 1.], [2., 2., 0.], [1., 1., 0]])", "_, lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef,", "import LooseVersion import numpy as np import pytest from scipy", "coef_path_ = linear_model.lars_path( X, y, method='lar', Gram=G) alpha_, _, coef", "decreasing # also test verbose output from io import StringIO", "the # same results. X = 3 * diabetes.data alphas,", "{}} for estname in estimator_parameter_map: params = default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator", "\"\"\" Test that user input regarding copy_X is not being", "returns the coefficients in # their original units, that is,", "1e-3 ocur = len(cov[C - eps < abs(cov)]) if i", "sys.stdout = old_stdout def test_simple_precomputed(): # The same, with precomputed", "normalized data X = 3 * diabetes.data alphas, _, lasso_path", "going down to 6 # non-zero coefs assert_equal(len(lars.alphas_), 7) @ignore_warnings", "linalg.norm(y - np.dot(X, coef_cd_)) ** 2 + .1 * linalg.norm(coef_cd_,", "0, 0], [-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514,", "above test without the positive option. This is due #", "active, coef, path = (estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_) for k", "y_train, y_test): # The following should not fail despite copy=False", "= linear_model.lars_path( X, y, method='lar', verbose=10) sys.stdout = old_stdout for", "which the LARS has to go # far in the", "[ linear_model.LassoLars(), linear_model.Lars(), # regression test for gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False),", "# Test the LassoLarsCV object by checking that the optimal", "The range of alphas chosen for coefficient comparison here is", "in zip(output1, output2): assert_allclose(o1, o2) @pytest.mark.parametrize('method', ['lar', 'lasso']) @pytest.mark.parametrize('return_path', [True,", "y, Gram=G, Xy=Xy, method=method) for expected, got in zip(output, output_pre):", "smallest alpha reached by the Lars-Lasso algorithm and start to", "on a very ill-conditioned design, and check that # it", "linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method='lar', positive=True) method = 'lasso' _, _,", "See below. # not normalized data X = 3 *", "and normalize=False # 2) fit_intercept=True and normalize=True # Let's generate", "estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} for", "alphas_min = [10, 0.9, 1e-4] for alpha_min in alphas_min: alphas,", "-1.218422599914637, -3.457726183014808, -4.021304522060710, -45.827461592423745, -47.776608869312305, -47.911561610746404, -47.914845922736234, -48.039562334265717]]) model_lasso_lars =", "sys.stdout = StringIO() _, _, coef_path_ = linear_model.lars_path( X, y,", "def test_simple_precomputed(): # The same, with precomputed Gram matrix _,", "= np.c_[X, x, x] # add correlated features lars_cv =", "70, 100 k = 5 X = rng.randn(n, m) w", "alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd", "########################################################################### @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_copyX_behaviour(copy_X): \"\"\" Test that user", "import assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater", "check that LARS and coordinate # descent give the same", "X, y, method='lar', Gram=G) alpha_, _, coef = linear_model.lars_path( X,", "2 + .1 * linalg.norm(coef_lars_, 1)) coord_descent = linear_model.Lasso(.1, tol=1e-6,", "chosen for coefficient comparison here is restricted # as compared", "Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ ==", "'lar', 'lasso': output = linear_model.lars_path(X, y, method=method) output_pre = linear_model.lars_path(X,", "y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for c, a", "-5.924804108067312, -7.996385265061972]]) model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True, normalize=True) model_lasso_lars2.fit(X, y) skl_betas2", "should not fail despite copy=False _lars_path_residues(X_train, y_train, X_test, y_test, copy=False)", "the data used in the bug report 7778 y =", "== X.shape[1] finally: sys.stdout = old_stdout def test_simple_precomputed(): # The", "X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features", "2, axis=0)) skl_betas2 /= normx[:, np.newaxis] assert_array_almost_equal(r2, skl_betas2, decimal=12) ###########################################################################", "assert_allclose from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from", "X = np.array([[3., 3., 1.], [2., 2., 0.], [1., 1.,", "the ``return_path=False`` option with Gram remains correct alphas_, _, coef_path_", "assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from", "# increases as the number of samples increases. # This", "y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_R_implementation():", "linalg.norm(coef, 1)) lars = linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit, X, y)", "linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) def test_rank_deficient_design(): #", "+= sigma * rng.rand(*y.shape) y = y.squeeze() lars_alphas, _, lars_coef", "the given dataset, with the given steps chosen. old_alpha =", "y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha =", "an adaptations. See below. # not normalized data X =", "output2): assert_equal(len(output1), len(output2)) for o1, o2 in zip(output1, output2): assert_allclose(o1,", "2]).T n_targets = Y.shape[1] estimators = [ linear_model.LassoLars(), linear_model.Lars(), #", "np.isnan(coef_path_).any() residual = np.dot(X, coef_path_[:, -1]) - y assert_less((residual **", "check that # it does not blow up, and stays", "= estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_)", "# not normalized data X = 3 * diabetes.data alphas,", "# numpy deprecation def test_lasso_gives_lstsq_solution(): # Test that Lars Lasso", "test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]), 6) #", "diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha = lars_cv.alpha_ assert not", "lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for c, a in zip(lasso_path.T,", "_, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=True,", "axis=0) normx = np.sqrt(np.sum(temp ** 2, axis=0)) skl_betas2 /= normx[:,", "= linear_model.lars_path( X, y, method='lar', return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert", "right thing Y = np.vstack([y, y ** 2]).T n_targets =", "_, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0., method='lasso',", "= coord_descent.fit(X, y).coef_ obj_cd = ((1. / (2. * 3.))", "normalized data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X,", "0, 9.901611055290553], [0, 7.495923132833733, 9.245133544334507, 17.389369207545062, 26.971656815643499], [0, 0, -1.569380717440311,", "= datasets.load_diabetes() X, y = diabetes.data, diabetes.target G = np.dot(X.T,", "= np.random.RandomState(42) x = rng.randn(len(y)) X = diabetes.data X =", "tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1]", "correlated features lars_cv = linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y) assert len(w) ==", "= sys.stdout try: sys.stdout = StringIO() _, _, coef_path_ =", "The default of the `iid`') # 0.22 def test_lars_path_positive_constraint(): #", "linalg.norm(coef_cd_, 1)) assert_less(obj_lars, obj_cd * (1. + 1e-8)) def test_lasso_lars_vs_lasso_cd():", "linear_model.lars_path( X, y, method='lar', return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_", "[True, False]) def test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy, Gram=G, n_samples=n_samples,", "of samples increases. # This property is not actually guaranteed", "output = linear_model.lars_path(X, y, method=method) output_pre = linear_model.lars_path(X, y, Gram=G,", "lars_path and previous lasso output style # under these conditions.", "fit_intercept=False, normalize=False) model_lasso_lars.fit(X, y) skl_betas = model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas, decimal=12)", "large input, the # fold data is in read-only mode", "singular matrix X1 = np.array([[1, 1.], [1., 1.]]) y1 =", "# Avoid FutureWarning about default value change when numpy >=", "= lars(X, t(y), type=\"lasso\", intercept=TRUE, # trace=TRUE, normalize=TRUE) # r2", "code: # # library(lars) # model_lasso_lars = lars(X, t(y), type=\"lasso\",", "[-1e-32, 0, 0], [1, 1, 1]] y = [10, 10,", "= [5, 0, 5] for X in ( [[5, 0],", "{}, 'LassoLarsIC': {}} @pytest.mark.filterwarnings('ignore: The default value of cv') #", "skl_betas2 /= normx[:, np.newaxis] assert_array_almost_equal(r2, skl_betas2, decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X', [True,", "checking that # - some good features are selected. #", "** 2 + .1 * linalg.norm(coef_cd_, 1)) assert_less(obj_lars, obj_cd *", "1) + 1) y = np.dot(X, w) sigma = 0.2", "assert min(estimator.coef_) >= 0 def test_lasso_lars_vs_lasso_cd_positive(): # Test that LassoLars", "The default value of cv') # 0.22 def test_lars_cv(): #", "Test that Lars gives least square solution at the end", "= linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error", "add 5 bad features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic =", "'lasso']) @pytest.mark.parametrize('return_path', [True, False]) def test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy,", "obj_cd = ((1. / (2. * 3.)) * linalg.norm(y -", "this step before comparing # their results. ########################################################################### # #", "option with Gram and Xy remains # correct X, y", "alphas_[-1] def test_no_path_precomputed(): # Test that the ``return_path=False`` option with", "the correct output alphas_, _, coef_path_ = linear_model.lars_path( X, y,", "test_lars_cv_max_iter(): with warnings.catch_warnings(record=True) as w: rng = np.random.RandomState(42) x =", "option returns the correct output alphas_, _, coef_path_ = linear_model.lars_path(", "old_alpha = lars_cv.alpha_ assert not hasattr(lars_cv, 'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning') def test_lars_cv_max_iter():", "intercept=TRUE, # trace=TRUE, normalize=TRUE) # r2 = t(model_lasso_lars2$beta) r2 =", "and is just a # property of the given dataset,", "y) clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y) err = linalg.norm(clf1.coef_", "path length of the LassoLars is right lasso = linear_model.LassoLars()", "# The range of alphas chosen for coefficient comparison here", "6 + 1 in a Lars going down to 6", "1)) coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_ = coord_descent.fit(X, y).coef_", "y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check that the sequence of", "200, 100): X = diabetes.data[:length] y = diabetes.target[:length] lars_cv.fit(X, y)", "from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing", "is to keep covariances tied and decreasing # also test", "normalize=False, positive=True).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3)", "despite copy=False _lars_path_residues(X_train, y_train, X_test, y_test, copy=False) @pytest.mark.filterwarnings('ignore: The default", "assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def test_collinearity(): # Check that lars_path is", "the following # scenarios: # 1) fit_intercept=False and normalize=False #", "_, _, coef_path = linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1,", "# classes in this same function here default_parameter = {'fit_intercept':", "by Efron et al 2004. The coefficients are typically in", "default __init__ value \"\"\" lasso_lars = LassoLarsIC(precompute=False) rng = np.random.RandomState(0)", "linear_model.lars_path( X, y, Gram=G, method=method, return_path=return_path)) def test_x_none_gram_none_raises_value_error(): # Test", "def test_lars_path_readonly_data(): # When using automated memory mapping on large", "y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True) for", "test_lasso_lars_vs_lasso_cd(): # Test that LassoLars and Lasso using coordinate descent", "linear_model.lars_path( X, y, Gram=G, method='lar') for i, coef_ in enumerate(coef_path_.T):", "by checking that the optimal alpha # increases as the", "with the given steps chosen. old_alpha = 0 lars_cv =", "17.389369207545062, 26.971656815643499], [0, 0, -1.569380717440311, -5.924804108067312, -7.996385265061972]]) model_lasso_lars2 = linear_model.LassoLars(alpha=0,", "= linear_model.lars_path( X, y, method='lar', Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1])", "user input to .fit for copy_X overrides default __init__ value", "note above) temp = X - np.mean(X, axis=0) normx =", "normalize=False) cd_coef_ = coord_descent.fit(X, y).coef_ cd_obj = objective_function(cd_coef_) assert_less(lars_obj, cd_obj", "R result was obtained using the following code: # #", "copy_X == np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_fit_copyX_behaviour(copy_X): \"\"\"", "1) y = np.dot(X, w) sigma = 0.2 y +=", "= np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic),", "dataset, with the given steps chosen. old_alpha = 0 lars_cv", "the right thing Y = np.vstack([y, y ** 2]).T n_targets", "ocur == X.shape[1] finally: sys.stdout = old_stdout def test_simple_precomputed(): #", "square solution at the end # of the path _,", "linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X = diabetes.data X = np.c_[X,", "the positive option of all estimator # classes in this", "alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lasso', Xy=Xy, Gram=G,", "3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',", "= linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X, y) def test_lars_path_readonly_data(): # When", "do that, therefore, we need to do this step before", "use another dataset that has multiple drops diabetes = datasets.load_diabetes()", "estimators = [ linear_model.LassoLars(), linear_model.Lars(), # regression test for gh-1615", "True, R returns the coefficients in # their original units,", "+ np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert np.all(np.isfinite(clf.coef_))", "a singular matrix X1 = np.array([[1, 1.], [1., 1.]]) y1", "result was obtained using the following code: # # library(lars)", "linear_model.LassoLars(alpha=0, fit_intercept=True, normalize=True) model_lasso_lars2.fit(X, y) skl_betas2 = model_lasso_lars2.coef_path_ # Let's", "R returns the coefficients in # their original units, that", "/ (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis]) clf =", "sklearn # does not do that, therefore, we need to", "estimator # classes in this same function here default_parameter =", "to converge, and check that LARS and coordinate # descent", "assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) # test error on unknown IC", "2 + alpha * linalg.norm(coef, 1)) lars = linear_model.LassoLars(alpha=alpha, normalize=False)", "@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 @pytest.mark.parametrize( 'classifier',", "dataset that has multiple drops diabetes = datasets.load_diabetes() X, y", "coef_cd_ = coord_descent.fit(X, y).coef_ obj_cd = ((1. / (2. *", "X.shape[1] finally: sys.stdout = old_stdout def test_simple_precomputed(): # The same,", "early stopping is used. # (test : before, in the", "0, 0, 0, 0, 0.025219751009936], [0, -3.577397088285891, -4.702795355871871, -7.016748621359461, -7.614898471899412,", "cd_obj = objective_function(cd_coef_) assert_less(lars_obj, cd_obj * (1. + 1e-8)) def", "the test on the diabetes dataset # ensure that we", "obj_cd * (1. + 1e-8)) def test_lasso_lars_vs_lasso_cd(): # Test that", "np.mean(X, axis=0) normx = np.sqrt(np.sum(temp ** 2, axis=0)) skl_betas2 /=", "1, (100, 5)) X_copy = X.copy() y = X[:, 2]", "make use of this function # we do the test", "off normalization lars = linear_model.LassoLars(.1, normalize=False) coef_lars_ = lars.fit(X, y).coef_", "linear_model.lars_path, None, y, Gram=None, Xy=Xy) def test_all_precomputed(): # Test that", "assert_raises(ValueError, lars_broken.fit, X, y) def test_lars_path_readonly_data(): # When using automated", "a range of alphas, we had to make an adaptations.", "= linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # normalized data X", "same, with precomputed Gram matrix _, _, coef_path_ = linear_model.lars_path(", "from sklearn.model_selection import train_test_split from sklearn.utils.testing import assert_allclose from sklearn.utils.testing", "the positive option for all estimator classes default_parameter = {'fit_intercept':", "* len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2 +", "3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')", "answers # Note it used to be the case that", "X = np.c_[X, x, x] # add correlated features lars_cv", "sys.stdout try: sys.stdout = StringIO() _, _, coef_path_ = linear_model.lars_path(", "for o1, o2 in zip(output1, output2): assert_allclose(o1, o2) @pytest.mark.parametrize('method', ['lar',", "we do the test on the diabetes dataset # ensure", "2 + .1 * linalg.norm(coef_cd_, 1)) assert_less(obj_lars, obj_cd * (1.", "assert_array_almost_equal(r, skl_betas, decimal=12) ########################################################################### ########################################################################### # Scenario 2: Let's compare", "X, y, Gram=G, method='lar') for i, coef_ in enumerate(coef_path_.T): res", "linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for c,", "output2): assert_allclose(o1, o2) @pytest.mark.parametrize('method', ['lar', 'lasso']) @pytest.mark.parametrize('return_path', [True, False]) def", "- np.dot(X, coef)) ** 2 + alpha * linalg.norm(coef, 1))", "lasso_lars.fit(X, y) assert copy_X == np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X', [True, False])", "default value change when numpy >= 1.14 rcond = None", "n = 5 H = 1. / (np.arange(1, n +", "test_lars_precompute(classifier): # Check for different values of precompute G =", "/ (2. * len(X)) * linalg.norm(y - np.dot(X, coef)) **", ".0001 def objective_function(coef): return (1. / (2. * len(X)) *", "that LassoLars and Lasso using coordinate descent give the #", "cv') # 0.22 @pytest.mark.parametrize( 'classifier', [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]) def test_lars_precompute(classifier):", "lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,", "method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) for c, a in zip(lasso_path.T,", "finally: sys.stdout = old_stdout def test_simple_precomputed(): # The same, with", "return_path): _assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path), linear_model.lars_path( X,", "adaptations. See below. # not normalized data X = 3", "_, coef_path_ = linear_model.lars_path( X, y, Gram=G, method='lar') for i,", "same results when using the positive option # This test", "to do this step before comparing # their results. ###########################################################################", "their results. ########################################################################### # # The R result was obtained", "input regarding copy_X is not being overridden (it was until", "alpha * linalg.norm(coef, 1)) lars = linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit,", "the # fold data is in read-only mode # This", "alpha_min=0.01) assert not np.isnan(coef_path_).any() residual = np.dot(X, coef_path_[:, -1]) -", "len(cov[C - eps < abs(cov)]) if i < X.shape[1]: assert", "coef_path_[:, -1]) assert alpha_ == alphas_[-1] @pytest.mark.filterwarnings('ignore: The default value", "-47.776608869312305, -47.911561610746404, -47.914845922736234, -48.039562334265717]]) model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False) model_lasso_lars.fit(X,", "import StringIO import sys old_stdout = sys.stdout try: sys.stdout =", "to a solution given # by the coordinate descent solver", "y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k],", "= linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error =", "compare R vs sklearn when fit_intercept=False and # normalize=False ###########################################################################", "-1]) assert alpha_ == alphas_[-1] def test_no_path_all_precomputed(): # Test that", "# ensure that we get negative coefficients when positive=False #", "in general and is just a # property of the", "linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) # same test, with", "1 else: # no more than max_pred variables can go", "1] alpha = .0001 def objective_function(coef): return (1. / (2.", "test_lars_cv(): # Test the LassoLarsCV object by checking that the", "skl_betas2 = model_lasso_lars2.coef_path_ # Let's rescale back the coefficients returned", "range of alphas, we had to make an adaptations. See", "'classifier', [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]) def test_lars_precompute(classifier): # Check for different", "_, _, coefs = \\ linear_model.lars_path(X, y, return_path=True, method=method, positive=False)", "use the coefs to compute the objective function, # we", "Principle of Lars is to keep covariances tied and decreasing", "[2., 2., 0.], [1., 1., 0]]) y = np.array([1., 0.,", "@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 def test_lars_cv():", "close to a solution given # by the coordinate descent", "np.all(np.diff(lasso.alphas_) < 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso lars on", "= 5 X = rng.randn(n, m) w = np.zeros((m, 1))", "y).coef_ cd_obj = objective_function(cd_coef_) assert_less(lars_obj, cd_obj * (1. + 1e-8))", "= i[:k] w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) +", "* linalg.norm(y - np.dot(X, coef_cd_)) ** 2 + .1 *", "bad features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic", "assert np.all(np.diff(lasso.alphas_) < 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso lars", "the sequence of alphas is always decreasing assert np.all(np.diff(lasso.alphas_) <", "else -1 coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond`", "7778 y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366]) x =", "with warnings.catch_warnings(record=True) as w: rng = np.random.RandomState(42) x = rng.randn(len(y))", "Y = np.vstack([y, y ** 2]).T n_targets = Y.shape[1] estimators", "1e-8)) def test_lars_add_features(): # assure that at least some features", "a solution given # by the coordinate descent solver #", "coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) assert not np.isnan(coef_path_).any() residual =", "version 0.21) \"\"\" lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False) rng = np.random.RandomState(0)", "rcond = None if LooseVersion(np.__version__) >= '1.14' else -1 coef_lstsq", "0, 0, 9.901611055290553], [0, 7.495923132833733, 9.245133544334507, 17.389369207545062, 26.971656815643499], [0, 0,", "import TempMemmap from sklearn.exceptions import ConvergenceWarning from sklearn import linear_model,", "estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:,", "* (1. + 1e-8)) def test_lasso_lars_vs_lasso_cd(): # Test that LassoLars", "if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y)", "return (1. / (2. * len(X)) * linalg.norm(y - np.dot(X,", "value \"\"\" lasso_lars = LassoLarsIC(precompute=False) rng = np.random.RandomState(0) X =", "_, _, coefs = \\ linear_model.lars_path(X, y, return_path=True, method=method, positive=True)", "same result as lars_path and previous lasso output style #", "(it was until at least version 0.21) \"\"\" lasso_lars =", "path to converge, and check that LARS and coordinate #", "# This is a non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data", "a very ill-conditioned design, and check that # it does", "= ignore_warnings(clf.fit)(X, y).coef_ for precompute in [True, False, 'auto', None]:", "from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing", "np.dot(X.T, y) alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lasso',", "linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y) assert len(w) == 0 def test_lasso_lars_ic(): #", "``return_path=False`` option with Gram and Xy remains # correct X,", "before comparing # their results. ########################################################################### # # The R", "y - np.dot(X, coef_) cov = np.dot(X.T, res) C =", "X # use un-normalized dataset clf = linear_model.LassoLars(alpha=0.) clf.fit(X1, y)", "scipy import linalg from sklearn.model_selection import train_test_split from sklearn.utils.testing import", "the case that Lars had to use the drop for", "estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred) @pytest.mark.filterwarnings('ignore: The", "consistency test that checks that LARS Lasso is handling rank", "np.array([1., 0., 0]) rng = np.random.RandomState(0) f = ignore_warnings _,", "alphas): if a == 0: continue lasso_cd.alpha = a lasso_cd.fit(X,", "does not blow up, and stays somewhat close to a", "linear_model.LassoLarsCV() for length in (400, 200, 100): X = diabetes.data[:length]", "Let's generate the data used in the bug report 7778", "implementation available in R (lars library) under the following #", "-0.336938391359179, 0, 0, 0.001213370600853, 0.048162321585148], [0, 0, 0, 2.231558436628169, 2.723267514525966,", "n_samples = y.size def test_simple(): # Principle of Lars is", "[10, 0.9, 1e-4] for alpha_min in alphas_min: alphas, _, lasso_path", "length in (400, 200, 100): X = diabetes.data[:length] y =", "0.025219751009936], [0, -3.577397088285891, -4.702795355871871, -7.016748621359461, -7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853,", "= np.array([[0.47299829, 0, 0, 0, 0], [0.08239882, 0.85784863, 0, 0,", "positive=False) assert coefs.min() < 0 _, _, coefs = \\", "t(model_lasso_lars2$beta) r2 = np.array([[0, 0, 0, 0, 0], [0, 0,", "x, x] # add correlated features lars_cv = linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X,", "_lars_path_residues(X_train, y_train, X_test, y_test, copy=False) @pytest.mark.filterwarnings('ignore: The default of the", "linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for", "descent give the # same results when early stopping is", "alphas is always decreasing assert np.all(np.diff(lasso.alphas_) < 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned():", "y, method='lasso') _, lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6,", "supp = i[:k] w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1)", "0, 0, -0.476624256777266, 0, 0, 0, 0, 0.025219751009936], [0, -3.577397088285891,", "X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y, copy_X=copy_X)", "for a range of alphas, we had to make an", "the path _, _, coef_path_ = linear_model.lars_path(X, y, method='lasso') coef_lstsq", "method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path(): # Test that", "is due # to the circumstance that the Lars-Lasso algorithm", "np.dot(X.T, X) clf = classifier(precompute=G) output_1 = ignore_warnings(clf.fit)(X, y).coef_ for", "normalize=True # # Note: When normalize is equal to True,", "alphas chosen for coefficient comparison here is restricted # as", "def test_lars_lstsq(): # Test that Lars gives least square solution", "that lasso_path (using lars_path output style) gives # the same", "give the # same results when early stopping is used.", "turn off normalization lars = linear_model.LassoLars(.1, normalize=False) coef_lars_ = lars.fit(X,", "@pytest.mark.parametrize( 'classifier', [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]) def test_lars_precompute(classifier): # Check for", "same results when early stopping is used. # (test :", "'lasso': output = linear_model.lars_path(X, y, method=method) output_pre = linear_model.lars_path(X, y,", "and previous lasso output style # under these conditions. rng", "linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]), 6) # The path should", "linear_model.LassoLars(), linear_model.Lars(), # regression test for gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ]", "err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # normalized data", "y).coef_ for precompute in [True, False, 'auto', None]: clf =", "coefs assert_equal(len(lars.alphas_), 7) @ignore_warnings def test_multitarget(): # Assure that estimators", "match='broken'): linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method='lar', positive=True) method = 'lasso' _,", "features are selected. # - alpha_bic > alpha_aic # -", "# same results when using the positive option # This", "@pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22 def test_lars_path_positive_constraint():", "sigma = 0.2 y += sigma * rng.rand(*y.shape) y =", "(100, 5)) X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X,", "0, 0.001213370600853, 0.048162321585148], [0, 0, 0, 2.231558436628169, 2.723267514525966, 2.811549786389614, 2.813766976061531,", "numpy >= 1.14 rcond = None if LooseVersion(np.__version__) >= '1.14'", "= np.random.RandomState(0) X = rng.normal(0, 1, (100, 5)) X_copy =", "when numpy >= 1.14 rcond = None if LooseVersion(np.__version__) >=", "= len(cov[C - eps < abs(cov)]) if i < X.shape[1]:", "normalize=True, tol=1e-8, positive=True) for c, a in zip(lasso_path.T[:-1], alphas[:-1]): #", "coefficients when positive=False # and all positive when positive=True #", "def test_x_none_gram_none_raises_value_error(): # Test that lars_path with no X and", "linear_model.lars_path( X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9) alpha_, _, coef", "# The R result was obtained using the following code:", "with precomputed Gram matrix _, _, coef_path_ = linear_model.lars_path( X,", "checks that LARS Lasso is handling rank # deficient input", "lars_cv.alpha_) old_alpha = lars_cv.alpha_ assert not hasattr(lars_cv, 'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning') def", "= linear_model.lars_path( X, y, Gram=G, method='lar') for i, coef_ in", "method=method) for expected, got in zip(output, output_pre): assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore:", "sure it's bounded n_samples = 10 X = rng.rand(n_samples, 5)", "1e20, 0], [-1e-32, 0, 0], [1, 1, 1]] y =", "= np.array([1, 1]) _, _, coef_path = linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T,", "library(lars) # model_lasso_lars2 = lars(X, t(y), type=\"lasso\", intercept=TRUE, # trace=TRUE,", "linear_model.LarsCV, linear_model.LassoLarsIC]) def test_lars_precompute(classifier): # Check for different values of", "estimator classes default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha':", "m) rng.shuffle(i) supp = i[:k] w[supp] = np.sign(rng.randn(k, 1)) *", "diabetes = datasets.load_diabetes() X, y = diabetes.data, diabetes.target G =", "coef_path_ = linear_model.lars_path( X, y, method='lar') alpha_, _, coef =", "+ 1 in a Lars going down to 6 #", "implementation agrees with the LassoLars # implementation available in R", "= LassoLarsIC(copy_X=copy_X, precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1,", "from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC # TODO: use another dataset", "coef_lars_ = lars.fit(X, y).coef_ obj_lars = (1. / (2. *", "# by the coordinate descent solver # Also test that", "# the estimator classes just make use of this function", "- np.mean(X, axis=0) normx = np.sqrt(np.sum(temp ** 2, axis=0)) skl_betas2", "clf.fit(X1, y) # Avoid FutureWarning about default value change when", "linear_model.LassoLars(alpha=0.) clf.fit(X1, y) # Avoid FutureWarning about default value change", "linear_model.lars_path( X, y, method='lar', verbose=10) sys.stdout = old_stdout for i,", "= linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check that", "X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1])", "i = np.arange(0, m) rng.shuffle(i) supp = i[:k] w[supp] =", "Gram raises exception Xy = np.dot(X.T, y) assert_raises(ValueError, linear_model.lars_path, None,", "X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y) assert", "# Test that lars_path with no X and Gram raises", "lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]), 6) # The", "the ``return_path=False`` option with Gram and Xy remains # correct", "0], [1e-32, 0, 0], [0, 0, 1]] ): # To", "y1 = np.array([1, 1]) _, _, coef_path = linear_model.lars_path(X1, y1)", "generate the data used in the bug report 7778 y", "that is, they are rescaled back, whereas sklearn # does", "cv') # 0.22 def test_lars_cv(): # Test the LassoLarsCV object", "1e-4] for alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X,", "for the positive parameter on the lars_path method # the", "np.vstack([y, y ** 2]).T n_targets = Y.shape[1] estimators = [", "The coefficients are typically in congruence up to # the", "len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) # test error on unknown IC lars_broken", "(1. + 1e-8)) def test_lars_add_features(): # assure that at least", "handling rank # deficient input data (with n_features < rank)", "abs(cov)]) if i < X.shape[1]: assert ocur == i +", "= default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False, **params) estimator.fit(X, y)", "Y[:, k]) y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k],", "= linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y)", "return_path=return_path)) def test_x_none_gram_none_raises_value_error(): # Test that lars_path with no X", "coef)) ** 2 + alpha * linalg.norm(coef, 1)) lars =", "by checking that # - some good features are selected.", "sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns from sklearn.utils.testing import", "# we need to turn off normalization lars = linear_model.LassoLars(.1,", "lars_path with precomputed Gram and Xy gives the right answer", "import ignore_warnings from sklearn.utils.testing import assert_warns from sklearn.utils.testing import TempMemmap", "linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:,", "y).coef_ assert_array_almost_equal(output_1, output_2, decimal=8) def test_singular_matrix(): # Test when input", "test_lasso_lars_vs_lasso_cd_early_stopping(): # Test that LassoLars and Lasso using coordinate descent", "estimator.active_, estimator.coef_, estimator.coef_path_) for k in range(n_targets): estimator.fit(X, Y[:, k])", "raises exception Xy = np.dot(X.T, y) assert_raises(ValueError, linear_model.lars_path, None, y,", "correct alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar', Gram=G)", "y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # normalized", "- some good features are selected. # - alpha_bic >", "lars_path with no X and Gram raises exception Xy =", "has multiple drops diabetes = datasets.load_diabetes() X, y = diabetes.data,", "# it does not blow up, and stays somewhat close", "from scipy import linalg from sklearn.model_selection import train_test_split from sklearn.utils.testing", "{'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} for estname in estimator_parameter_map:", "+ alpha * linalg.norm(coef, 1)) lars = linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning,", "test error on unknown IC lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit,", "R vs sklearn when fit_intercept=False and # normalize=False ########################################################################### #", "is robust to collinearity in input X = np.array([[3., 3.,", "[1, 0]]) def test_rank_deficient_design(): # consistency test that checks that", "y.squeeze() lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso') _, lasso_coef2,", "import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns", "just make use of this function # we do the", "# # The R result was obtained using the following", "y) assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None, Xy=Xy) def test_all_precomputed(): #", "5 H = 1. / (np.arange(1, n + 1) +", "these: # assert_raises(ValueError, linear_model.lars_path, diabetes['data'], # diabetes['target'], method='lar', positive=True) with", "coordinate # descent give the same answers # Note it", "was obtained using the following code: # # library(lars) #", "3.) * linalg.norm(y - np.dot(X, coef_lars_)) ** 2 + .1", "Test that the ``return_path=False`` option returns the correct output alphas_,", "converge, and check that LARS and coordinate # descent give", "0, 0, 0], [0.30114139, -0.07501577, 0.80895216, 0, 0], [-0.01460346, -0.1015233,", "y, method='lar', return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1]", "= [ linear_model.LassoLars(), linear_model.Lars(), # regression test for gh-1615 linear_model.LassoLars(fit_intercept=False),", "cov = np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3", "np.random.RandomState(42) x = rng.randn(len(y)) X = diabetes.data X = np.c_[X,", "0, 0, 0], [0.08239882, 0.85784863, 0, 0, 0], [0.30114139, -0.07501577,", "drops diabetes = datasets.load_diabetes() X, y = diabetes.data, diabetes.target G", "y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for c,", "1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis]) clf", "mapping on large input, the # fold data is in", "comparing # their results. ########################################################################### # # The R result", "= linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_lasso_cd_early_stopping(): # Test", "ocur = len(cov[C - eps < abs(cov)]) if i <", "# their original units, that is, they are rescaled back,", "-7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853, 0.048162321585148], [0, 0, 0, 2.231558436628169,", "do the right thing Y = np.vstack([y, y ** 2]).T", "for expected, got in zip(output, output_pre): assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore: `rcond`", "alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False,", "test is basically a copy of the above with additional", "else: # no more than max_pred variables can go into", "0, -0.476624256777266, 0, 0, 0, 0, 0.025219751009936], [0, -3.577397088285891, -4.702795355871871,", "rescale back the coefficients returned by sklearn before comparing #", "_, coef = linear_model.lars_path( X, y, method='lar', Gram=G, return_path=False) assert_array_almost_equal(coef,", "model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True, normalize=True) model_lasso_lars2.fit(X, y) skl_betas2 = model_lasso_lars2.coef_path_", "test_all_precomputed(): # Test that lars_path with precomputed Gram and Xy", "lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_path_length(): # Test that the path", "_, coef_path_ = linear_model.lars_path( X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9)", "gives # the same result as lars_path and previous lasso", "that Lars had to use the drop for good #", "in input X = np.array([[3., 3., 1.], [2., 2., 0.],", "5)) X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y)", "np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829, -83.777653739190711, -83.784156932888934, -84.033390591756657],", "* rng.rand(*y.shape) y = y.squeeze() lars_alphas, _, lars_coef = linear_model.lars_path(X,", "########################################################################### # # The R result was obtained using the", "sys old_stdout = sys.stdout try: sys.stdout = StringIO() _, _,", "== np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_fit_copyX_behaviour(copy_X): \"\"\" Test", "/ (2. * 3.) * linalg.norm(y - np.dot(X, coef_lars_)) **", "Test the LassoLarsIC object by checking that # - some", "== 0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error =", "method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for c, a", "+ 1) y = np.dot(X, w) sigma = 0.2 y", "== i + 1 else: # no more than max_pred", "# (test : before, in the middle, and in the", "test_lasso_lars_vs_lasso_cd_positive(): # Test that LassoLars and Lasso using coordinate descent", "coef_ in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov", "n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng", "copy=False) @pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22 def", "**params) estimator.fit(X, y) assert min(estimator.coef_) >= 0 def test_lasso_lars_vs_lasso_cd_positive(): #", "optimal alpha # increases as the number of samples increases.", "assert_less(error, 0.01) # The range of alphas chosen for coefficient", "that user input regarding copy_X is not being overridden (it", "output alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar') alpha_,", "alpha in np.linspace(6e-1, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(fit_intercept=False,", "y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) for c, a in", "the path X1 = 3 * X # use un-normalized", "linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False) cd_coef_ = coord_descent.fit(X, y).coef_ cd_obj = objective_function(cd_coef_)", "positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for c, a in", "up, and stays somewhat close to a solution given #", "normalization for alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X,", "for method 'lar' (default) and lasso # Once deprecation of", "method='lar') for i, coef_ in enumerate(coef_path_.T): res = y -", "Efron et al 2004. The coefficients are typically in congruence", "are typically in congruence up to # the smallest alpha", "obtained using the following code: # # library(lars) # model_lasso_lars2", "X) clf = classifier(precompute=G) output_1 = ignore_warnings(clf.fit)(X, y).coef_ for precompute", "assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y)", "thing Y = np.vstack([y, y ** 2]).T n_targets = Y.shape[1]", "def test_no_path_all_precomputed(): # Test that the ``return_path=False`` option with Gram", "_, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0.,", "LassoLars # implementation available in R (lars library) under the", "compare R vs sklearn when fit_intercept=True and # normalize=True #", "k]) y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_)", "necessary # test for 6d2b4c # Hilbert matrix n =", "0, -79.810362809499026, -83.528788732782829, -83.777653739190711, -83.784156932888934, -84.033390591756657], [0, 0, 0, 0,", "X = rng.randn(n, m) w = np.zeros((m, 1)) i =", "that Lars Lasso gives least square solution at the end", "copy_Gram=False, alpha_min=0., method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path(): #", "need to turn off normalization lars = linear_model.LassoLars(.1, normalize=False) coef_lars_", "under the following # scenarios: # 1) fit_intercept=False and normalize=False", "Test lasso lars on a very ill-conditioned design, and check", "# far in the path to converge, and check that", "r2 = np.array([[0, 0, 0, 0, 0], [0, 0, 0,", "exception Xy = np.dot(X.T, y) assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None,", "sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC # TODO: use another dataset that", "assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k],", "will change') # numpy deprecation def test_lasso_gives_lstsq_solution(): # Test that", "n_samples = 10 X = rng.rand(n_samples, 5) y = np.zeros(n_samples)", "Test that Lars Lasso gives least square solution at the", "2).sum(), 1.) # just make sure it's bounded n_samples =", "20): clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X, y) clf2 =", "positive=True).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) #", "linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_R_implementation(): # Test that", "input to .fit for copy_X overrides default __init__ value \"\"\"", "len(output2)) for o1, o2 in zip(output1, output2): assert_allclose(o1, o2) @pytest.mark.parametrize('method',", "+ 1e-8)) def test_lars_add_features(): # assure that at least some", "test, with normalized data X = diabetes.data alphas, _, lasso_path", "linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2():", "= y.size def test_simple(): # Principle of Lars is to", "_, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) assert not np.isnan(coef_path_).any() residual", "linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X, y) clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8,", "= linear_model.Lasso(fit_intercept=False, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if", "0, 2.231558436628169, 2.723267514525966, 2.811549786389614, 2.813766976061531, 2.817462468949557, 2.817368178703816, 2.816221090636795], [0, 0,", "with Gram and Xy remains # correct X, y =", "ill-conditioned design, and check that # it does not blow", "= coord_descent.fit(X, y).coef_ cd_obj = objective_function(cd_coef_) assert_less(lars_obj, cd_obj * (1.", "# deficient input data (with n_features < rank) in the", "def test_lasso_lars_fit_copyX_behaviour(copy_X): \"\"\" Test that user input to .fit for", "multiple drops diabetes = datasets.load_diabetes() X, y = diabetes.data, diabetes.target", "normalize=False) coef_lars_ = lars.fit(X, y).coef_ obj_lars = (1. / (2.", "the same answers # Note it used to be the", "This test is basically a copy of the above with", "= 5 H = 1. / (np.arange(1, n + 1)", "linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6,", "coord_descent.fit(X, y).coef_ obj_cd = ((1. / (2. * 3.)) *", "y = [10, 10, 1] alpha = .0001 def objective_function(coef):", "# option. However for the middle part, the comparison of", "/ (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) **", "[5, 0, 5] for X in ( [[5, 0], [0,", "H, np.arange(n)) assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose)", "linear_model.lars_path, diabetes['data'], # diabetes['target'], method='lar', positive=True) with pytest.warns(DeprecationWarning, match='broken'): linear_model.lars_path(diabetes['data'],", "of all estimator # classes in this same function here", "to # the least-squares-solution for small alphas, see 'Least Angle", "is the main test for the positive parameter on the", "al 2004. The coefficients are typically in congruence up to", "precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100, 5))", "# Test that LassoLars and Lasso using coordinate descent give", "w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1) y", "objective function, # we need to turn off normalization lars", "0, 0, 0], [0, 0, 0, 8.371887668009453, 19.463768371044026], [0, 0,", "( [[5, 0], [0, 5], [10, 10]], [[10, 10, 0],", "(1. / (2. * 3.) * linalg.norm(y - np.dot(X, coef_lars_))", "np.sqrt(np.sum(temp ** 2, axis=0)) skl_betas2 /= normx[:, np.newaxis] assert_array_almost_equal(r2, skl_betas2,", "* (rng.rand(k, 1) + 1) y = np.dot(X, w) sigma", "normalize=False) model_lasso_lars.fit(X, y) skl_betas = model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas, decimal=12) ###########################################################################", "precompute G = np.dot(X.T, X) clf = classifier(precompute=G) output_1 =", "Xy gives the right answer G = np.dot(X.T, X) Xy", "alpha_, _, coef = linear_model.lars_path( X, y, method='lasso', Gram=G, Xy=Xy,", "assert_equal(len(lars.alphas_), 7) @ignore_warnings def test_multitarget(): # Assure that estimators receiving", "= 1e-3 ocur = len(cov[C - eps < abs(cov)]) if", "linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for c, a in zip(lasso_path.T, alphas): if", "[0, -3.577397088285891, -4.702795355871871, -7.016748621359461, -7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853, 0.048162321585148],", "26.971656815643499], [0, 0, -1.569380717440311, -5.924804108067312, -7.996385265061972]]) model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True,", "gonna test the positive option for all estimator classes default_parameter", "StringIO() _, _, coef_path_ = linear_model.lars_path( X, y, method='lar', verbose=10)", "Test that lars_path with no X and Gram raises exception", "function here default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha':", "[10, 10]], [[10, 10, 0], [1e-32, 0, 0], [0, 0,", "alpha=alpha, normalize=False, positive=True).fit(X, y) clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False,", "normalize=True) model_lasso_lars2.fit(X, y) skl_betas2 = model_lasso_lars2.coef_path_ # Let's rescale back", "method='lasso') coef_lstsq = np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def test_collinearity():", "fold data is in read-only mode # This is a", "a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01)", "0, 0, -79.810362809499026, -83.528788732782829, -83.777653739190711, -83.784156932888934, -84.033390591756657], [0, 0, 0,", "Xy = np.dot(X.T, y) for method in 'lar', 'lasso': output", "= rng.rand(n_samples, 5) y = np.zeros(n_samples) _, _, coef_path_ =", "# of the path _, _, coef_path_ = linear_model.lars_path(X, y,", "y_test): # The following should not fail despite copy=False _lars_path_residues(X_train,", "go into the active set assert ocur == X.shape[1] def", "normalize=False, positive=True).fit(X, y) clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X,", "assert_warns(ConvergenceWarning, lars.fit, X, y) lars_coef_ = lars.coef_ lars_obj = objective_function(lars_coef_)", "'LassoLarsCV': {}, 'LassoLarsIC': {}} for estname in estimator_parameter_map: params =", "= np.random.RandomState(0) f = ignore_warnings _, _, coef_path_ = f(linear_model.lars_path)(X,", "y) # Avoid FutureWarning about default value change when numpy", "rng = np.random.RandomState(42) x = rng.randn(len(y)) X = diabetes.data X", "selected. # - alpha_bic > alpha_aic # - n_nonzero_bic <", "normalize=FALSE) # r = t(model_lasso_lars$beta) # r = np.array([[0, 0,", "getattr(linear_model, estname)(positive=False, **params) estimator.fit(X, y) assert estimator.coef_.min() < 0 estimator", "assert_less(error, 0.01) def test_lasso_lars_path_length(): # Test that the path length", "Lasso using coordinate descent give the # same results when", "= getattr(linear_model, estname)(positive=True, **params) estimator.fit(X, y) assert min(estimator.coef_) >= 0", "y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) def", "6) # The path should be of length 6 +", "estimator.fit(X, Y) Y_pred = estimator.predict(X) alphas, active, coef, path =", "eps = 1e-3 ocur = len(cov[C - eps < abs(cov)])", "o1, o2 in zip(output1, output2): assert_allclose(o1, o2) @pytest.mark.parametrize('method', ['lar', 'lasso'])", "of the given dataset, with the given steps chosen. old_alpha", "y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter will change') # numpy", "-4.702795355871871, -7.016748621359461, -7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853, 0.048162321585148], [0, 0,", "assert_less(lars_obj, cd_obj * (1. + 1e-8)) def test_lars_add_features(): # assure", "before, in the middle, and in the last part of", "linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_lasso_cd_early_stopping(): # Test that", "coordinate descent Lasso y = [5, 0, 5] for X", "# under these conditions. rng = np.random.RandomState(42) # Generate data", "alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y)", "= a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error,", "The following should not fail despite copy=False _lars_path_residues(X_train, y_train, X_test,", "0]) rng = np.random.RandomState(0) f = ignore_warnings _, _, coef_path_", "the comparison of coefficient values # for a range of", "np.random.RandomState(0) X = rng.normal(0, 1, (100, 5)) X_copy = X.copy()", "'lasso' _, _, coefs = \\ linear_model.lars_path(X, y, return_path=True, method=method,", "clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X, y) err =", "3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2 + .1", "= np.dot(X.T, X) Xy = np.dot(X.T, y) alphas_, _, coef_path_", "Lasso using coordinate descent give the # same results. X", "= f(linear_model.lars_path)(X, y, alpha_min=0.01) assert not np.isnan(coef_path_).any() residual = np.dot(X,", "# add 5 bad features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic", "= old_stdout for i, coef_ in enumerate(coef_path_.T): res = y", "7) @ignore_warnings def test_multitarget(): # Assure that estimators receiving multidimensional", "X_test, y_test, copy=False) @pytest.mark.filterwarnings('ignore: The default of the `iid`') #", "the R result (read the note above) temp = X", "following code: # # library(lars) # model_lasso_lars2 = lars(X, t(y),", "def test_lasso_lars_vs_lasso_cd(): # Test that LassoLars and Lasso using coordinate", "########################################################################### ########################################################################### # Scenario 2: Let's compare R vs sklearn", "positive option. This is due # to the circumstance that", "Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def", "strategy for this but this is no longer the case", "lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) #", "assert ocur == X.shape[1] finally: sys.stdout = old_stdout def test_simple_precomputed():", "on unknown IC lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X, y)", "splitted_data = train_test_split(X, y, random_state=42) with TempMemmap(splitted_data) as (X_train, X_test,", "-3.51251449, -8.52445396, 6.12277822, -19.42109366]) x = np.array([[0.47299829, 0, 0, 0,", "results. ########################################################################### # # The R result was obtained using", "assert alpha_ == alphas_[-1] def test_no_path_all_precomputed(): # Test that the", "in read-only mode # This is a non-regression test for:", "and Lasso using coordinate descent give the # same results.", "Test that LassoLars and Lasso using coordinate descent give the", "the last part of the path) alphas_min = [10, 0.9,", "= linear_model.lars_path(X, y, method='lasso') coef_lstsq = np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:,", "_assert_same_lars_path_result(output1, output2): assert_equal(len(output1), len(output2)) for o1, o2 in zip(output1, output2):", "not normalized data X = 3 * diabetes.data alphas, _,", "for the middle part, the comparison of coefficient values #", "7.495923132833733, 9.245133544334507, 17.389369207545062, 26.971656815643499], [0, 0, -1.569380717440311, -5.924804108067312, -7.996385265061972]]) model_lasso_lars2", "X, y = diabetes.data, diabetes.target G = np.dot(X.T, X) Xy", "Generate data n, m = 70, 100 k = 5", "1)) i = np.arange(0, m) rng.shuffle(i) supp = i[:k] w[supp]", "situation in which the LARS has to go # far", "a non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X, y,", "X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',", "2] lasso_lars.fit(X, y) assert copy_X == np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X', [True,", "= X.copy() y = X[:, 2] lasso_lars.fit(X, y, copy_X=copy_X) assert", "is always decreasing assert np.all(np.diff(lasso.alphas_) < 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): #", "= train_test_split(X, y, random_state=42) with TempMemmap(splitted_data) as (X_train, X_test, y_train,", "1., 0]]) y = np.array([1., 0., 0]) rng = np.random.RandomState(0)", "to # diverge thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in", "see 'Least Angle Regression' # by Efron et al 2004.", "assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less from", "X, y) lars_coef_ = lars.coef_ lars_obj = objective_function(lars_coef_) coord_descent =", "np.random.RandomState(42) # Generate data n, m = 70, 100 k", "same answers # Note it used to be the case", "clf2.coef_) assert_less(err, 1e-3) # normalized data X = diabetes.data alphas,", "the LassoLars # implementation available in R (lars library) under", "property of the given dataset, with the given steps chosen.", "assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check that the sequence of alphas", "r2 = t(model_lasso_lars2$beta) r2 = np.array([[0, 0, 0, 0, 0],", "test for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X, y, random_state=42) with", "np import pytest from scipy import linalg from sklearn.model_selection import", "method='lar', verbose=10) sys.stdout = old_stdout for i, coef_ in enumerate(coef_path_.T):", "LassoLarsIC(precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100, 5))", "input is a singular matrix X1 = np.array([[1, 1.], [1.,", "that the sequence of alphas is always decreasing assert np.all(np.diff(lasso.alphas_)", "linear_model.LassoLarsIC]) def test_lars_precompute(classifier): # Check for different values of precompute", "= linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_ = coord_descent.fit(X, y).coef_ obj_cd =", "design, and check that # it does not blow up,", "a in zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha", "get negative coefficients when positive=False # and all positive when", "X) Xy = np.dot(X.T, y) for method in 'lar', 'lasso':", "use the drop for good # strategy for this but", "[True, False]) def test_lasso_lars_copyX_behaviour(copy_X): \"\"\" Test that user input regarding", "reached by the Lars-Lasso algorithm and start to # diverge", "########################################################################### # Scenario 2: Let's compare R vs sklearn when", "option. This is due # to the circumstance that the", "= linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8,", "See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in np.linspace(6e-1, 1 - 1e-2,", "y) lars_coef_ = lars.coef_ lars_obj = objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha,", "lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0]", "_, coef = linear_model.lars_path( X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9,", "def test_lars_precompute(classifier): # Check for different values of precompute G", "= linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def", "alphas, we had to make an adaptations. See below. #", "return_path=True, method=method, positive=True) assert coefs.min() >= 0 # now we", "default value of cv') # 0.22 def test_lars_cv(): # Test", "'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning') def test_lars_cv_max_iter(): with warnings.catch_warnings(record=True) as w: rng =", "datasets.load_diabetes() X, y = diabetes.data, diabetes.target G = np.dot(X.T, X)", "X, y, method='lar', Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_", "alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] @pytest.mark.filterwarnings('ignore:", "X, y, method='lar') alpha_, _, coef = linear_model.lars_path( X, y,", "all estimator classes default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars':", "for gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ] for estimator in estimators: estimator.fit(X,", "by sklearn before comparing # against the R result (read", "def objective_function(coef): return (1. / (2. * len(X)) * linalg.norm(y", "0], [-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514, -0.0803561,", "LassoLarsIC object by checking that # - some good features", "[1e-32, 0, 0], [0, 0, 1]] ): # To be", "((1. / (2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_))", "path should be of length 6 + 1 in a", "of Lars is to keep covariances tied and decreasing #", "lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True,", "precomputed Gram matrix _, _, coef_path_ = linear_model.lars_path( X, y,", "is basically a copy of the above with additional positive", "LassoLars and Lasso using coordinate descent give the # same", "0], [1, 0]]) def test_rank_deficient_design(): # consistency test that checks", "# we do the test on the diabetes dataset #", "output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method) for expected, got", "100 k = 5 X = rng.randn(n, m) w =", "y) alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lasso', Xy=Xy,", "assert alpha_ == alphas_[-1] @pytest.mark.filterwarnings('ignore: The default value of cv')", "5) y = np.zeros(n_samples) _, _, coef_path_ = linear_model.lars_path(X, y,", "Gram and Xy remains # correct X, y = 3", "== alphas_[-1] @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22", "[10, 10, 1] alpha = .0001 def objective_function(coef): return (1.", "19.463768371044026], [0, 0, 0, 0, 9.901611055290553], [0, 7.495923132833733, 9.245133544334507, 17.389369207545062,", "@pytest.mark.parametrize('return_path', [True, False]) def test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy, Gram=G,", "# scenarios: # 1) fit_intercept=False and normalize=False # 2) fit_intercept=True", "0, 0, 0, 0, -79.810362809499026, -83.528788732782829, -83.777653739190711, -83.784156932888934, -84.033390591756657], [0,", "{'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} @pytest.mark.filterwarnings('ignore: The default value", "https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in np.linspace(6e-1, 1 - 1e-2, 20): clf1", "data used in the bug report 7778 y = np.array([-6.45006793,", "_, _, coef_path_ = linear_model.lars_path(X, y, method='lasso') coef_lstsq = np.linalg.lstsq(X,", "linear_model.lars_path( X, y, method='lar', Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert", "# the smallest alpha reached by the Lars-Lasso algorithm and", "using coordinate descent give the # same results when using", "lasso_lars = LassoLarsIC(precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1,", "= np.sqrt(np.sum(temp ** 2, axis=0)) skl_betas2 /= normx[:, np.newaxis] assert_array_almost_equal(r2,", "for different values of precompute G = np.dot(X.T, X) clf", "linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha", "being overridden (it was until at least version 0.21) \"\"\"", "3., 1.], [2., 2., 0.], [1., 1., 0]]) y =", "don't include alpha=0 lasso_cd.alpha = a lasso_cd.fit(X, y) error =", "= 10 X = rng.rand(n_samples, 5) y = np.zeros(n_samples) _,", "0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} for estname in estimator_parameter_map: params", "ignore_warnings _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) assert not", "rescaled back, whereas sklearn # does not do that, therefore,", "Gram=None, Xy=Xy) def test_all_precomputed(): # Test that lars_path with precomputed", "linear_model.lars_path( X, y, method='lar') alpha_, _, coef = linear_model.lars_path( X,", "deficient input data (with n_features < rank) in the same", "sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import", "0, 8.371887668009453, 19.463768371044026], [0, 0, 0, 0, 9.901611055290553], [0, 7.495923132833733,", "cv') # 0.22 def test_estimatorclasses_positive_constraint(): # testing the transmissibility for", "False, 'auto', None]: clf = classifier(precompute=precompute) output_2 = clf.fit(X, y).coef_", "LassoLarsIC # TODO: use another dataset that has multiple drops", "X in ( [[5, 0], [0, 5], [10, 10]], [[10,", "in enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov =", "len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2 + alpha", "obtained using the following code: # # library(lars) # model_lasso_lars", "the coefficients in # their original units, that is, they", "until at least version 0.21) \"\"\" lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False)", "# Check for different values of precompute G = np.dot(X.T,", "linalg from sklearn.model_selection import train_test_split from sklearn.utils.testing import assert_allclose from", "converge to # the least-squares-solution for small alphas, see 'Least", "and Lasso using coordinate descent give the # same results", "in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min)", "2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637, -3.457726183014808, -4.021304522060710, -45.827461592423745, -47.776608869312305, -47.911561610746404,", "lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8)", "alpha reached by the Lars-Lasso algorithm and start to #", "np.random.RandomState(42) X = diabetes.data X = np.c_[X, rng.randn(X.shape[0], 5)] #", "1)) lars = linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit, X, y) lars_coef_", "used in the bug report 7778 y = np.array([-6.45006793, -3.51251449,", "# Test that Lars Lasso gives least square solution at", "np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366]) x = np.array([[0.47299829, 0, 0,", "now we gonna test the positive option for all estimator", "y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def test_collinearity(): # Check that lars_path", "def test_all_precomputed(): # Test that lars_path with precomputed Gram and", "= diabetes.data X = np.c_[X, x, x] # add correlated", "0.0407278, 0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291]]) X =", "positive=True) for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include", "increases as the number of samples increases. # This property", "f = ignore_warnings _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01)", "# the same result as lars_path and previous lasso output", "verbose output from io import StringIO import sys old_stdout =", "coef = linear_model.lars_path( X, y, method='lar', Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:,", "x = np.array([[0.47299829, 0, 0, 0, 0], [0.08239882, 0.85784863, 0,", "# also test verbose output from io import StringIO import", "y) assert_equal(len(lars.coef_.nonzero()[0]), 6) # The path should be of length", "# same results when early stopping is used. # (test", "rng.normal(0, 1, (100, 5)) X_copy = X.copy() y = X[:,", "output_2, decimal=8) def test_singular_matrix(): # Test when input is a", "0], [0, 5], [10, 10]], [[10, 10, 0], [1e-32, 0,", "that the Lars-Lasso algorithm does not converge to # the", "# library(lars) # model_lasso_lars2 = lars(X, t(y), type=\"lasso\", intercept=TRUE, #", "robust to collinearity in input X = np.array([[3., 3., 1.],", "method='lasso') _, lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6, fit_intercept=False)", "np.array([[1, 1.], [1., 1.]]) y1 = np.array([1, 1]) _, _,", "test, with the classifiers for alpha in np.linspace(1e-2, 1 -", "sklearn.utils.testing import TempMemmap from sklearn.exceptions import ConvergenceWarning from sklearn import", "estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred)", "and decreasing # also test verbose output from io import", "remains # correct X, y = 3 * diabetes.data, diabetes.target", "no longer the case with the # equality_tolerance checks X", "obj_lars = (1. / (2. * 3.) * linalg.norm(y -", "the LassoLarsCV object by checking that the optimal alpha #", "alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar') alpha_, _,", "assert_array_almost_equal(Y_pred[:, k], y_pred) @pytest.mark.filterwarnings('ignore: The default value of cv') #", "zip(lasso_path.T, alphas): if a == 0: continue lasso_cd.alpha = a", "= linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) for c,", "decimal=8) def test_singular_matrix(): # Test when input is a singular", "output style # under these conditions. rng = np.random.RandomState(42) #", "results when early stopping is used. # (test : before,", "sklearn import linear_model, datasets from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC #", "= linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X =", "_, coef_path = linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]])", "assert_array_almost_equal(r2, skl_betas2, decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_copyX_behaviour(copy_X): \"\"\"", "coef_path_[:, -1]) - y assert_less((residual ** 2).sum(), 1.) # just", "0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso lars on a very", "as lars_path and previous lasso output style # under these", "1 - 1e-2, 20): clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2", "coef_lstsq = np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def test_collinearity(): #", "[[1e20, 1e20, 0], [-1e-32, 0, 0], [1, 1, 1]] y", "fit_intercept=True and normalize=True # Let's generate the data used in", "5 bad features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0]", "for the positive option of all estimator # classes in", "2.817462468949557, 2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637, -3.457726183014808, -4.021304522060710, -45.827461592423745, -47.776608869312305,", "linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_ = coord_descent.fit(X, y).coef_ obj_cd = ((1.", "linear_model.lars_path( X, y, method='lar', Gram=G) alpha_, _, coef = linear_model.lars_path(", "assert_equal(len(lars.coef_.nonzero()[0]), 6) # The path should be of length 6", "test_no_path_precomputed(): # Test that the ``return_path=False`` option with Gram remains", "it's bounded n_samples = 10 X = rng.rand(n_samples, 5) y", "default of the `iid`') # 0.22 def test_lars_path_positive_constraint(): # this", "X = 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X,", "assert ocur == X.shape[1] def _assert_same_lars_path_result(output1, output2): assert_equal(len(output1), len(output2)) for", "clf = linear_model.LassoLars(alpha=0.) clf.fit(X1, y) # Avoid FutureWarning about default", "gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ] for estimator in estimators: estimator.fit(X, Y)", "io import StringIO import sys old_stdout = sys.stdout try: sys.stdout", "# Test the LassoLarsIC object by checking that # -", "the `iid`') # 0.22 def test_lars_path_positive_constraint(): # this is the", "rng.randn(n, m) w = np.zeros((m, 1)) i = np.arange(0, m)", "y = [5, 0, 5] for X in ( [[5,", "5 X = rng.randn(n, m) w = np.zeros((m, 1)) i", "alpha_ == alphas_[-1] def test_no_path_all_precomputed(): # Test that the ``return_path=False``", "0, 1]] ): # To be able to use the", "solution given # by the coordinate descent solver # Also", "test_collinearity(): # Check that lars_path is robust to collinearity in", "(1. + 1e-8)) def test_lasso_lars_vs_lasso_cd(): # Test that LassoLars and", "lars(X, t(y), type=\"lasso\", intercept=FALSE, # trace=TRUE, normalize=FALSE) # r =", "# normalize=True # # Note: When normalize is equal to", "linear_model.Lars(), # regression test for gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ] for", "library(lars) # model_lasso_lars = lars(X, t(y), type=\"lasso\", intercept=FALSE, # trace=TRUE,", "# Test that Lars gives least square solution at the", "Test that the ``return_path=False`` option with Gram and Xy remains", "assert not np.isnan(coef_path_).any() residual = np.dot(X, coef_path_[:, -1]) - y", "np.dot(X.T, X) Xy = np.dot(X.T, y) alphas_, _, coef_path_ =", "# trace=TRUE, normalize=FALSE) # r = t(model_lasso_lars$beta) # r =", "i + 1 else: # no more than max_pred variables", "0 _, _, coefs = \\ linear_model.lars_path(X, y, return_path=True, method=method,", "R result (read the note above) temp = X -", "down to 6 # non-zero coefs assert_equal(len(lars.alphas_), 7) @ignore_warnings def", "and # normalize=True # # Note: When normalize is equal", "np.dot(X.T, X) Xy = np.dot(X.T, y) for method in 'lar',", "lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error", "-0.476624256777266, 0, 0, 0, 0, 0.025219751009936], [0, -3.577397088285891, -4.702795355871871, -7.016748621359461,", "np.dot(X, coef_lars_)) ** 2 + .1 * linalg.norm(coef_lars_, 1)) coord_descent", "X - np.mean(X, axis=0) normx = np.sqrt(np.sum(temp ** 2, axis=0))", "y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also", "option of all estimator # classes in this same function", "in a Lars going down to 6 # non-zero coefs", "if i < X.shape[1]: assert ocur == i + 1", "Lasso is handling rank # deficient input data (with n_features", "and Xy gives the right answer G = np.dot(X.T, X)", "import numpy as np import pytest from scipy import linalg", "-47.911561610746404, -47.914845922736234, -48.039562334265717]]) model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False) model_lasso_lars.fit(X, y)", "not converge to # the least-squares-solution for small alphas, see", "the same way # as coordinate descent Lasso y =", "the following code: # # library(lars) # model_lasso_lars2 = lars(X,", "y) assert len(w) == 0 def test_lasso_lars_ic(): # Test the", "for c, a in zip(lasso_path.T, alphas): if a == 0:", "# testing the transmissibility for the positive option of all", "data X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y,", "The same, with precomputed Gram matrix _, _, coef_path_ =", "r = t(model_lasso_lars$beta) # r = np.array([[0, 0, 0, 0,", "/= normx[:, np.newaxis] assert_array_almost_equal(r2, skl_betas2, decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X', [True, False])", "for alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y,", "decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_copyX_behaviour(copy_X): \"\"\" Test that", "a Lars going down to 6 # non-zero coefs assert_equal(len(lars.alphas_),", "numpy deprecation def test_lars_lstsq(): # Test that Lars gives least", "# don't include alpha=0 lasso_cd.alpha = a lasso_cd.fit(X, y) error", "Xy=Xy) def test_all_precomputed(): # Test that lars_path with precomputed Gram", "report 7778 y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366]) x", "in estimators: estimator.fit(X, Y) Y_pred = estimator.predict(X) alphas, active, coef,", "y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # The", "-4.021304522060710, -45.827461592423745, -47.776608869312305, -47.911561610746404, -47.914845922736234, -48.039562334265717]]) model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False,", "solution at the end # of the path X1 =", "H = 1. / (np.arange(1, n + 1) + np.arange(n)[:,", "np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha = lars_cv.alpha_ assert not hasattr(lars_cv, 'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning')", "typically in congruence up to # the smallest alpha reached", "'Least Angle Regression' # by Efron et al 2004. The", "original units, that is, they are rescaled back, whereas sklearn", "def test_singular_matrix(): # Test when input is a singular matrix", "np.dot(X.T, y) n_samples = y.size def test_simple(): # Principle of", "coef, path = (estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_) for k in", "X.copy() y = X[:, 2] lasso_lars.fit(X, y) assert copy_X ==", "method='lar', return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def", "-1]) - y assert_less((residual ** 2).sum(), 1.) # just make", "enumerate(coef_path_.T): res = y - np.dot(X, coef_) cov = np.dot(X.T,", "linear_model.lars_path_gram( Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path), linear_model.lars_path( X, y, Gram=G,", "solver # Also test that lasso_path (using lars_path output style)", "the right answer G = np.dot(X.T, X) Xy = np.dot(X.T,", "linear_model.LassoLars() lasso.fit(X, y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_)", "Test that lars_path with precomputed Gram and Xy gives the", "[True, False, 'auto', None]: clf = classifier(precompute=precompute) output_2 = clf.fit(X,", "method='lar', positive=True) with pytest.warns(DeprecationWarning, match='broken'): linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method='lar', positive=True)", "(lars library) under the following # scenarios: # 1) fit_intercept=False", "i, coef_ in enumerate(coef_path_.T): res = y - np.dot(X, coef_)", "linear_model.lars_path(X, y, return_path=True, method=method, positive=True) assert coefs.min() >= 0 #", "diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) n_samples", "conditions. rng = np.random.RandomState(42) # Generate data n, m =", "lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso') _, lasso_coef2, _", "square solution at the end # of the path X1", "3 * diabetes.data, diabetes.target G = np.dot(X.T, X) Xy =", "test, with normalization for alpha_min in alphas_min: alphas, _, lasso_path", "from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing", "= linear_model.lars_path(X, y, method='lasso') _, lasso_coef2, _ = linear_model.lasso_path(X, y,", "user input regarding copy_X is not being overridden (it was", "_, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)", "assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore: `rcond` parameter will change') # numpy deprecation", "-1] - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_path_length(): # Test that", "= 'lasso' _, _, coefs = \\ linear_model.lars_path(X, y, return_path=True,", "y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # similar", "linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ] for estimator in estimators: estimator.fit(X, Y) Y_pred", "and # normalize=False ########################################################################### # # The R result was", "Lasso y = [5, 0, 5] for X in (", "lars_cv = linear_model.LassoLarsCV() for length in (400, 200, 100): X", "input X = np.array([[3., 3., 1.], [2., 2., 0.], [1.,", "* 3.) * linalg.norm(y - np.dot(X, coef_lars_)) ** 2 +", "we need to do this step before comparing # their", "0 estimator = getattr(linear_model, estname)(positive=True, **params) estimator.fit(X, y) assert min(estimator.coef_)", "20): clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8,", "clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y) err = linalg.norm(clf1.coef_ -", "test verbose output from io import StringIO import sys old_stdout", "-7.996385265061972]]) model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True, normalize=True) model_lasso_lars2.fit(X, y) skl_betas2 =", "estimator.fit(X, y) assert estimator.coef_.min() < 0 estimator = getattr(linear_model, estname)(positive=True,", "ignore_warnings(clf.fit)(X, y).coef_ for precompute in [True, False, 'auto', None]: clf", "@pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 def test_estimatorclasses_positive_constraint():", "-83.528788732782829, -83.777653739190711, -83.784156932888934, -84.033390591756657], [0, 0, 0, 0, -0.476624256777266, 0,", "y = y.squeeze() lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso')", "`iid`') # 0.22 def test_lars_path_positive_constraint(): # this is the main", "and Xy remains # correct X, y = 3 *", "0.001213370600853, 0.048162321585148], [0, 0, 0, 2.231558436628169, 2.723267514525966, 2.811549786389614, 2.813766976061531, 2.817462468949557,", "and Gram raises exception Xy = np.dot(X.T, y) assert_raises(ValueError, linear_model.lars_path,", "0, 0, 0, 9.901611055290553], [0, 7.495923132833733, 9.245133544334507, 17.389369207545062, 26.971656815643499], [0,", "assert len(w) == 0 def test_lasso_lars_ic(): # Test the LassoLarsIC", "linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method) for expected, got in zip(output,", "vs sklearn when fit_intercept=True and # normalize=True # # Note:", "# normalize=False ########################################################################### # # The R result was obtained", "However for the middle part, the comparison of coefficient values", "fit_intercept=True and # normalize=True # # Note: When normalize is", "+ .1 * linalg.norm(coef_lars_, 1)) coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False)", "def test_lasso_lars_ic(): # Test the LassoLarsIC object by checking that", "alpha_min=0.9) alpha_, _, coef = linear_model.lars_path( X, y, method='lasso', Gram=G,", "decimal=12) ########################################################################### ########################################################################### # Scenario 2: Let's compare R vs", "return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_all_precomputed():", "# same test, with normalization for alpha_min in alphas_min: alphas,", "using coordinate descent give the # same results. X =", "0]]) def test_rank_deficient_design(): # consistency test that checks that LARS", "input data (with n_features < rank) in the same way", "therefore, we need to do this step before comparing #", "y, random_state=42) with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test): #", "Lars going down to 6 # non-zero coefs assert_equal(len(lars.alphas_), 7)", "Lars Lasso gives least square solution at the end #", "had to make an adaptations. See below. # not normalized", "def test_multitarget(): # Assure that estimators receiving multidimensional y do", "estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred) @pytest.mark.filterwarnings('ignore: The default value", "[0, 0, 1]] ): # To be able to use", "previous lasso output style # under these conditions. rng =", "= linear_model.LassoLars(alpha=0.) clf.fit(X1, y) # Avoid FutureWarning about default value", "linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # normalized data X =", "__init__ value \"\"\" lasso_lars = LassoLarsIC(precompute=False) rng = np.random.RandomState(0) X", "overridden (it was until at least version 0.21) \"\"\" lasso_lars", "y) n_samples = y.size def test_simple(): # Principle of Lars", "diabetes.data X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad", "# Note it used to be the case that Lars", "used to be the case that Lars had to use", "`rcond` parameter will change') # numpy deprecation def test_lars_lstsq(): #", "# Also test that lasso_path (using lars_path output style) gives", "_, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False,", "0, 0, 8.371887668009453, 19.463768371044026], [0, 0, 0, 0, 9.901611055290553], [0,", "0.06754067, 0.18064514, -0.0803561, 0.40427291]]) X = x.T ########################################################################### # Scenario", "and check that # it does not blow up, and", "# Principle of Lars is to keep covariances tied and", "y, method='lar', verbose=10) sys.stdout = old_stdout for i, coef_ in", "1]] y = [10, 10, 1] alpha = .0001 def", "include alpha=0 lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c", "linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha =", "default value of cv') # 0.22 @pytest.mark.parametrize( 'classifier', [linear_model.Lars, linear_model.LarsCV,", "LooseVersion(np.__version__) >= '1.14' else -1 coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0]", "in the middle, and in the last part of the", "(rng.rand(k, 1) + 1) y = np.dot(X, w) sigma =", "with normalized data X = diabetes.data alphas, _, lasso_path =", "= np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) # test", "0, 5] for X in ( [[5, 0], [0, 5],", "in range(n_targets): estimator.fit(X, Y[:, k]) y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_)", "{'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} for estname in", "unknown IC lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X, y) def", "[0.30114139, -0.07501577, 0.80895216, 0, 0], [-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0],", "ensure that we get negative coefficients when positive=False # and", "lars_coef = linear_model.lars_path(X, y, method='lasso') _, lasso_coef2, _ = linear_model.lasso_path(X,", "start to # diverge thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha", "= np.arange(0, m) rng.shuffle(i) supp = i[:k] w[supp] = np.sign(rng.randn(k,", "parameter on the lars_path method # the estimator classes just", "warnings from distutils.version import LooseVersion import numpy as np import", "library) under the following # scenarios: # 1) fit_intercept=False and", "assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) # test error on unknown IC lars_broken =", "# Test that sklearn LassoLars implementation agrees with the LassoLars", "numpy as np import pytest from scipy import linalg from", "the smallest alpha reached by the Lars-Lasso algorithm and start", "test_simple(): # Principle of Lars is to keep covariances tied", "assure that at least some features get added if necessary", "linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # similar test, with the", "X = x.T ########################################################################### # Scenario 1: Let's compare R", "{'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} @pytest.mark.filterwarnings('ignore: The default", "+ positive option is done use these: # assert_raises(ValueError, linear_model.lars_path,", "method=method, positive=False) assert coefs.min() < 0 _, _, coefs =", "linalg.norm(coef_lars_, 1)) coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_ = coord_descent.fit(X,", "= np.dot(X.T, y) assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None, Xy=Xy) def", "test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path), linear_model.lars_path(", "rng.rand(n_samples, 5) y = np.zeros(n_samples) _, _, coef_path_ = linear_model.lars_path(X,", "= np.dot(X, coef_path_[:, -1]) - y assert_less((residual ** 2).sum(), 1.)", "X, y = 3 * diabetes.data, diabetes.target G = np.dot(X.T,", "deprecation def test_lasso_gives_lstsq_solution(): # Test that Lars Lasso gives least", "[1., 1.]]) y1 = np.array([1, 1]) _, _, coef_path =", "following should not fail despite copy=False _lars_path_residues(X_train, y_train, X_test, y_test,", "# no more than max_pred variables can go into the", "0.80895216, 0, 0], [-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0], [-0.69363927, 0.06754067,", "[1., 1., 0]]) y = np.array([1., 0., 0]) rng =", "should be of length 6 + 1 in a Lars", "0.40427291]]) X = x.T ########################################################################### # Scenario 1: Let's compare", "positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True) for c, a", "def test_lasso_lars_copyX_behaviour(copy_X): \"\"\" Test that user input regarding copy_X is", "# library(lars) # model_lasso_lars = lars(X, t(y), type=\"lasso\", intercept=FALSE, #", "(1. / (2. * len(X)) * linalg.norm(y - np.dot(X, coef))", "to compute the objective function, # we need to turn", "): # To be able to use the coefs to", "= linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X, y) err = linalg.norm(clf1.coef_", "= np.random.RandomState(42) # Generate data n, m = 70, 100", "cd_obj * (1. + 1e-8)) def test_lars_add_features(): # assure that", "- np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov))", "y, return_path=True, method=method, positive=True) assert coefs.min() >= 0 # now", "un-normalized dataset clf = linear_model.LassoLars(alpha=0.) clf.fit(X1, y) # Avoid FutureWarning", "linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) for c, a", "x] # add correlated features lars_cv = linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y)", "y) skl_betas2 = model_lasso_lars2.coef_path_ # Let's rescale back the coefficients", "1]] ): # To be able to use the coefs", "continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c -", "assert_less(err, 1e-3) # normalized data X = diabetes.data alphas, _,", "= diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True)", "estimator.fit(X, y) assert min(estimator.coef_) >= 0 def test_lasso_lars_vs_lasso_cd_positive(): # Test", "- lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_path_length(): # Test that the", "[-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291]])", "sklearn when fit_intercept=True and # normalize=True # # Note: When", "algorithm does not converge to # the least-squares-solution for small", "import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises", "keep covariances tied and decreasing # also test verbose output", "bounded n_samples = 10 X = rng.rand(n_samples, 5) y =", "= estimator.predict(X) alphas, active, coef, path = (estimator.alphas_, estimator.active_, estimator.coef_,", "linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True) for c, a in zip(lasso_path.T[:-1], alphas[:-1]):", "this but this is no longer the case with the", "= clf.fit(X, y).coef_ assert_array_almost_equal(output_1, output_2, decimal=8) def test_singular_matrix(): # Test", "for all estimator classes default_parameter = {'fit_intercept': False} estimator_parameter_map =", "available in R (lars library) under the following # scenarios:", "_, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=False,", "https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X, y, random_state=42) with TempMemmap(splitted_data) as (X_train,", "the note above) temp = X - np.mean(X, axis=0) normx", "alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd =", "method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1]", "data (with n_features < rank) in the same way #", "no X and Gram raises exception Xy = np.dot(X.T, y)", "When using automated memory mapping on large input, the #", "normalize=True, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a", "we had to make an adaptations. See below. # not", "# Also check that the sequence of alphas is always", "similar test, with the classifiers for alpha in np.linspace(1e-2, 1", "= np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features lars_bic.fit(X,", "y).coef_ obj_lars = (1. / (2. * 3.) * linalg.norm(y", "this same function here default_parameter = {'fit_intercept': False} estimator_parameter_map =", "0, 0, 0, -0.476624256777266, 0, 0, 0, 0, 0.025219751009936], [0,", "that the ``return_path=False`` option with Gram and Xy remains #", "lasso = linear_model.LassoLars() lasso.fit(X, y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y)", "path _, _, coef_path_ = linear_model.lars_path(X, y, method='lasso') coef_lstsq =", "1.], [2., 2., 0.], [1., 1., 0]]) y = np.array([1.,", "y, Gram=G, method='lar') for i, coef_ in enumerate(coef_path_.T): res =", "coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_precomputed(): # Test", "set assert ocur == X.shape[1] finally: sys.stdout = old_stdout def", "that lars_path is robust to collinearity in input X =", "1e-3) # normalized data X = diabetes.data alphas, _, lasso_path", "< rank) in the same way # as coordinate descent", "rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter will change') # numpy deprecation", "# correct X, y = 3 * diabetes.data, diabetes.target G", "the # same results when early stopping is used. #", "that lars_path with no X and Gram raises exception Xy", "lars_cv.alpha_ assert not hasattr(lars_cv, 'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning') def test_lars_cv_max_iter(): with warnings.catch_warnings(record=True)", "lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_R_implementation(): # Test that sklearn LassoLars", "not do that, therefore, we need to do this step", "y = diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha = lars_cv.alpha_", "\"\"\" Test that user input to .fit for copy_X overrides", "-1.569380717440311, -5.924804108067312, -7.996385265061972]]) model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True, normalize=True) model_lasso_lars2.fit(X, y)", "of the above with additional positive # option. However for", "train_test_split from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_array_almost_equal from", "returns the correct output alphas_, _, coef_path_ = linear_model.lars_path( X,", "= None if LooseVersion(np.__version__) >= '1.14' else -1 coef_lstsq =", "positive when positive=True # for method 'lar' (default) and lasso", "# Create an ill-conditioned situation in which the LARS has", "- lasso_cd.coef_) assert_less(error, 0.01) # The range of alphas chosen", "linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit, X, y) lars_coef_ = lars.coef_ lars_obj", "assert_less(error, 0.01) # similar test, with the classifiers for alpha", "= rng.normal(0, 1, (100, 5)) X_copy = X.copy() y =", "-3.577397088285891, -4.702795355871871, -7.016748621359461, -7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853, 0.048162321585148], [0,", "tol=1e-6, normalize=False) coef_cd_ = coord_descent.fit(X, y).coef_ obj_cd = ((1. /", "Y.shape[1] estimators = [ linear_model.LassoLars(), linear_model.Lars(), # regression test for", "is used. # (test : before, in the middle, and", "# by Efron et al 2004. The coefficients are typically", "is equal to True, R returns the coefficients in #", "assert_allclose(o1, o2) @pytest.mark.parametrize('method', ['lar', 'lasso']) @pytest.mark.parametrize('return_path', [True, False]) def test_lars_path_gram_equivalent(method,", "y, Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0., method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_))", "IC lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X, y) def test_lars_path_readonly_data():", "rng.randn(len(y)) X = diabetes.data X = np.c_[X, x, x] #", "without the positive option. This is due # to the", "- lasso_cd.coef_) assert_less(error, 0.01) # same test, with normalization for", "2.811549786389614, 2.813766976061531, 2.817462468949557, 2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637, -3.457726183014808, -4.021304522060710,", "X, y, Gram=G, method=method, return_path=return_path)) def test_x_none_gram_none_raises_value_error(): # Test that", "y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert", "diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd =", "linear_model.Lasso(fit_intercept=False, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a", "= linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8)", "1 in a Lars going down to 6 # non-zero", "= lars_cv.alpha_ assert not hasattr(lars_cv, 'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning') def test_lars_cv_max_iter(): with", "trace=TRUE, normalize=FALSE) # r = t(model_lasso_lars$beta) # r = np.array([[0,", "clf = classifier(precompute=precompute) output_2 = clf.fit(X, y).coef_ assert_array_almost_equal(output_1, output_2, decimal=8)", "object by checking that # - some good features are", "model_lasso_lars = lars(X, t(y), type=\"lasso\", intercept=FALSE, # trace=TRUE, normalize=FALSE) #", "When normalize is equal to True, R returns the coefficients", "test that checks that LARS Lasso is handling rank #", ">= 1.14 rcond = None if LooseVersion(np.__version__) >= '1.14' else", "import assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings", "= objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False) cd_coef_ = coord_descent.fit(X,", "test_simple_precomputed(): # The same, with precomputed Gram matrix _, _,", "= y - np.dot(X, coef_) cov = np.dot(X.T, res) C", "0], [1, 1, 1]] y = [10, 10, 1] alpha", "import train_test_split from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_array_almost_equal", "data X = 3 * diabetes.data alphas, _, lasso_path =", "alphas, see 'Least Angle Regression' # by Efron et al", "-48.039562334265717]]) model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False) model_lasso_lars.fit(X, y) skl_betas =", "assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_all_precomputed(): #", "# consistency test that checks that LARS Lasso is handling", "classifier(precompute=G) output_1 = ignore_warnings(clf.fit)(X, y).coef_ for precompute in [True, False,", "estimator = getattr(linear_model, estname)(positive=True, **params) estimator.fit(X, y) assert min(estimator.coef_) >=", "# Test that the ``return_path=False`` option returns the correct output", "method # the estimator classes just make use of this", "Xy=Xy, Gram=G, alpha_min=0.9) alpha_, _, coef = linear_model.lars_path( X, y,", "of coefficient values # for a range of alphas, we", "0], [-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291]]) X = x.T ###########################################################################", "0, 0, 0.001213370600853, 0.048162321585148], [0, 0, 0, 2.231558436628169, 2.723267514525966, 2.811549786389614,", "to go # far in the path to converge, and", "does not do that, therefore, we need to do this", "y_pred) @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 def", "that Lars gives least square solution at the end #", "= linear_model.lars_path( X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef,", "error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_lasso_cd_early_stopping(): #", "given steps chosen. old_alpha = 0 lars_cv = linear_model.LassoLarsCV() for", "diabetes.data.shape[1]) # test error on unknown IC lars_broken = linear_model.LassoLarsIC('<unknown>')", "# the least-squares-solution for small alphas, see 'Least Angle Regression'", "ocur == i + 1 else: # no more than", "0., 0]) rng = np.random.RandomState(0) f = ignore_warnings _, _,", "= linear_model.LassoLars(.1, normalize=False) coef_lars_ = lars.fit(X, y).coef_ obj_lars = (1.", "# add correlated features lars_cv = linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y) assert", "the estimator classes just make use of this function #", "longer the case with the # equality_tolerance checks X =", "we gonna test the positive option for all estimator classes", "Test that sklearn LassoLars implementation agrees with the LassoLars #", "# Test lasso lars on a very ill-conditioned design, and", "= lars.fit(X, y).coef_ obj_lars = (1. / (2. * 3.)", "normalize=False # 2) fit_intercept=True and normalize=True # Let's generate the", "with Gram remains correct alphas_, _, coef_path_ = linear_model.lars_path( X,", "linear_model.LassoLars(.1, normalize=False) coef_lars_ = lars.fit(X, y).coef_ obj_lars = (1. /", "used. # (test : before, in the middle, and in", "0.22 @pytest.mark.parametrize( 'classifier', [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]) def test_lars_precompute(classifier): # Check", "does not converge to # the least-squares-solution for small alphas,", "@pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_copyX_behaviour(copy_X): \"\"\" Test that user input", "[0, 0, 0, 2.231558436628169, 2.723267514525966, 2.811549786389614, 2.813766976061531, 2.817462468949557, 2.817368178703816, 2.816221090636795],", "alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_)", "coef_path_ = linear_model.lars_path(X, y, method='lasso') coef_lstsq = np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq,", "Xy = np.dot(X.T, y) assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None, Xy=Xy)", "{}} @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 def", "assert estimator.coef_.min() < 0 estimator = getattr(linear_model, estname)(positive=True, **params) estimator.fit(X,", "Also test that lasso_path (using lars_path output style) gives #", "alphas_[-1] def test_no_path_all_precomputed(): # Test that the ``return_path=False`` option with", "- lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_lasso_cd_early_stopping(): # Test that LassoLars", "method='lar', Gram=G) alpha_, _, coef = linear_model.lars_path( X, y, method='lar',", "= old_stdout def test_simple_precomputed(): # The same, with precomputed Gram", "small alphas, see 'Least Angle Regression' # by Efron et", "lasso.fit(X, y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) #", "= {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {},", "rank) in the same way # as coordinate descent Lasso", "t(y), type=\"lasso\", intercept=FALSE, # trace=TRUE, normalize=FALSE) # r = t(model_lasso_lars$beta)", "range of alphas chosen for coefficient comparison here is restricted", "the objective function, # we need to turn off normalization", "method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for c, a in", "model_lasso_lars.fit(X, y) skl_betas = model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas, decimal=12) ########################################################################### ###########################################################################", "the ``return_path=False`` option returns the correct output alphas_, _, coef_path_", "solution at the end # of the path _, _,", "classes just make use of this function # we do", "error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # similar test,", "clf.fit(X, y).coef_ assert_array_almost_equal(output_1, output_2, decimal=8) def test_singular_matrix(): # Test when", "-19.42109366]) x = np.array([[0.47299829, 0, 0, 0, 0], [0.08239882, 0.85784863,", "= model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas, decimal=12) ########################################################################### ########################################################################### # Scenario 2:", "linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err,", "0], [-1e-32, 0, 0], [1, 1, 1]] y = [10,", "** 2]).T n_targets = Y.shape[1] estimators = [ linear_model.LassoLars(), linear_model.Lars(),", "np.dot(X, coef_path_[:, -1]) - y assert_less((residual ** 2).sum(), 1.) #", "the Lars-Lasso algorithm does not converge to # the least-squares-solution", "< abs(cov)]) if i < X.shape[1]: assert ocur == i", "they are rescaled back, whereas sklearn # does not do", "= diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha = lars_cv.alpha_ assert", "some good features are selected. # - alpha_bic > alpha_aic", "that # - some good features are selected. # -", "= np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1) y =", "w) sigma = 0.2 y += sigma * rng.rand(*y.shape) y", "got in zip(output, output_pre): assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore: `rcond` parameter will", "Lars is to keep covariances tied and decreasing # also", "= np.dot(X.T, y) alphas_, _, coef_path_ = linear_model.lars_path( X, y,", "correct output alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar')", "from sklearn import linear_model, datasets from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC", "y = diabetes.data, diabetes.target G = np.dot(X.T, X) Xy =", "y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_lasso_cd_early_stopping():", "X = [[1e20, 1e20, 0], [-1e-32, 0, 0], [1, 1,", "(400, 200, 100): X = diabetes.data[:length] y = diabetes.target[:length] lars_cv.fit(X,", "# diverge thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in np.linspace(6e-1,", "lasso2.alphas_) # Also check that the sequence of alphas is", "= {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} @pytest.mark.filterwarnings('ignore: The", "on large input, the # fold data is in read-only", "about default value change when numpy >= 1.14 rcond =", "assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns from", "X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9) alpha_, _, coef =", "y = np.zeros(n_samples) _, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto',", "the following code: # # library(lars) # model_lasso_lars = lars(X,", "(read the note above) temp = X - np.mean(X, axis=0)", "1.], [1., 1.]]) y1 = np.array([1, 1]) _, _, coef_path", "below. # not normalized data X = 3 * diabetes.data", "= classifier(precompute=precompute) output_2 = clf.fit(X, y).coef_ assert_array_almost_equal(output_1, output_2, decimal=8) def", "equal to True, R returns the coefficients in # their", "params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False, **params) estimator.fit(X, y) assert estimator.coef_.min()", "1]) _, _, coef_path = linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0],", "case with the # equality_tolerance checks X = [[1e20, 1e20,", "y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # same", "for copy_X overrides default __init__ value \"\"\" lasso_lars = LassoLarsIC(precompute=False)", "LAR + positive option is done use these: # assert_raises(ValueError,", "with the classifiers for alpha in np.linspace(1e-2, 1 - 1e-2,", "with the # equality_tolerance checks X = [[1e20, 1e20, 0],", "alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd =", "another dataset that has multiple drops diabetes = datasets.load_diabetes() X,", "descent Lasso y = [5, 0, 5] for X in", "estname)(positive=False, **params) estimator.fit(X, y) assert estimator.coef_.min() < 0 estimator =", "test_lars_path_readonly_data(): # When using automated memory mapping on large input,", "alphas_[-1] @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22 @pytest.mark.parametrize(", "# To be able to use the coefs to compute", "normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:,", "also test verbose output from io import StringIO import sys", "in the last part of the path) alphas_min = [10,", "method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_", "-83.777653739190711, -83.784156932888934, -84.033390591756657], [0, 0, 0, 0, -0.476624256777266, 0, 0,", "= linear_model.lars_path( X, y, method='lar') alpha_, _, coef = linear_model.lars_path(", "as coordinate descent Lasso y = [5, 0, 5] for", "y = X[:, 2] lasso_lars.fit(X, y, copy_X=copy_X) assert copy_X ==", "coef_path_[:, -1]) def test_collinearity(): # Check that lars_path is robust", "tol=1e-8, normalize=False, positive=True).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err,", "that has multiple drops diabetes = datasets.load_diabetes() X, y =", "# for a range of alphas, we had to make", "10, 1] alpha = .0001 def objective_function(coef): return (1. /", ".fit for copy_X overrides default __init__ value \"\"\" lasso_lars =", "2) fit_intercept=True and normalize=True # Let's generate the data used", "C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C -", "_, coef_path_ = linear_model.lars_path( X, y, method='lar') alpha_, _, coef", "def test_lars_path_positive_constraint(): # this is the main test for the", "clf = classifier(precompute=G) output_1 = ignore_warnings(clf.fit)(X, y).coef_ for precompute in", "y, alpha_min=0.01) assert not np.isnan(coef_path_).any() residual = np.dot(X, coef_path_[:, -1])", "in the same way # as coordinate descent Lasso y", "< 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso lars on a", "**params) estimator.fit(X, y) assert estimator.coef_.min() < 0 estimator = getattr(linear_model,", "= np.linalg.lstsq(X1, y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter will change')", "et al 2004. The coefficients are typically in congruence up", "is a singular matrix X1 = np.array([[1, 1.], [1., 1.]])", "of the `iid`') # 0.22 def test_lars_path_positive_constraint(): # this is", "coefficient comparison here is restricted # as compared with the", "\"\"\" lasso_lars = LassoLarsIC(precompute=False) rng = np.random.RandomState(0) X = rng.normal(0,", "X1 = np.array([[1, 1.], [1., 1.]]) y1 = np.array([1, 1])", "X, y, method='lar', return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ ==", "] for estimator in estimators: estimator.fit(X, Y) Y_pred = estimator.predict(X)", "linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X, y) err = linalg.norm(clf1.coef_ -", "k in range(n_targets): estimator.fit(X, Y[:, k]) y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k],", "by the Lars-Lasso algorithm and start to # diverge thereafter.", "# r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829,", "# same test, with normalized data X = diabetes.data alphas,", "return_path=True, method=method, positive=False) assert coefs.min() < 0 _, _, coefs", "and check that LARS and coordinate # descent give the", "-1]) def test_collinearity(): # Check that lars_path is robust to", "method='lar', positive=True) method = 'lasso' _, _, coefs = \\", "assert_array_almost_equal(active[k], estimator.active_) assert_array_almost_equal(coef[k], estimator.coef_) assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred) @pytest.mark.filterwarnings('ignore:", "error on unknown IC lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X,", "# property of the given dataset, with the given steps", "test the positive option for all estimator classes default_parameter =", "output from io import StringIO import sys old_stdout = sys.stdout", "method='lasso', Xy=Xy, Gram=G, alpha_min=0.9) alpha_, _, coef = linear_model.lars_path( X,", "that checks that LARS Lasso is handling rank # deficient", "Assure that estimators receiving multidimensional y do the right thing", "* diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True)", "clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X, y) clf2 = linear_model.Lasso(fit_intercept=False,", "5)] # add 5 bad features lars_bic.fit(X, y) lars_aic.fit(X, y)", "objective_function(cd_coef_) assert_less(lars_obj, cd_obj * (1. + 1e-8)) def test_lars_add_features(): #", "trace=TRUE, normalize=TRUE) # r2 = t(model_lasso_lars2$beta) r2 = np.array([[0, 0,", "to keep covariances tied and decreasing # also test verbose", "Note it used to be the case that Lars had", "y, Gram=G, method=method, return_path=return_path)) def test_x_none_gram_none_raises_value_error(): # Test that lars_path", "linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # The range of alphas", "LassoLarsIC(copy_X=copy_X, precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100,", "of cv') # 0.22 def test_estimatorclasses_positive_constraint(): # testing the transmissibility", "= y.squeeze() lars_alphas, _, lars_coef = linear_model.lars_path(X, y, method='lasso') _,", "vs sklearn when fit_intercept=False and # normalize=False ########################################################################### # #", "Gram=G, alpha_min=0.9) alpha_, _, coef = linear_model.lars_path( X, y, method='lasso',", "linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # same test, with normalized", "= diabetes.data X = np.c_[X, rng.randn(X.shape[0], 5)] # add 5", "None if LooseVersion(np.__version__) >= '1.14' else -1 coef_lstsq = np.linalg.lstsq(X1,", "10, 0], [1e-32, 0, 0], [0, 0, 1]] ): #", "lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) # test error on unknown", "of alphas, we had to make an adaptations. See below.", "np.random.RandomState(0) f = ignore_warnings _, _, coef_path_ = f(linear_model.lars_path)(X, y,", "for estname in estimator_parameter_map: params = default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator =", "lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X, y) def test_lars_path_readonly_data(): #", "[0, 0, -1.569380717440311, -5.924804108067312, -7.996385265061972]]) model_lasso_lars2 = linear_model.LassoLars(alpha=0, fit_intercept=True, normalize=True)", "== alphas_[-1] def test_no_path_all_precomputed(): # Test that the ``return_path=False`` option", "linear_model, datasets from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC # TODO: use", "1.) # just make sure it's bounded n_samples = 10", "y = np.array([1., 0., 0]) rng = np.random.RandomState(0) f =", "y, Gram=None, Xy=Xy) def test_all_precomputed(): # Test that lars_path with", "lars(X, t(y), type=\"lasso\", intercept=TRUE, # trace=TRUE, normalize=TRUE) # r2 =", "y = X[:, 2] lasso_lars.fit(X, y) assert copy_X == np.array_equal(X,", "* (1. + 1e-8)) def test_lars_add_features(): # assure that at", "Check for different values of precompute G = np.dot(X.T, X)", "that LARS and coordinate # descent give the same answers", "the drop for good # strategy for this but this", "coefs.min() >= 0 # now we gonna test the positive", "given # by the coordinate descent solver # Also test", "2004. The coefficients are typically in congruence up to #", "assert_less(err, 1e-3) # same test, with normalized data X =", "overrides default __init__ value \"\"\" lasso_lars = LassoLarsIC(precompute=False) rng =", "lars_path method # the estimator classes just make use of", "'auto', None]: clf = classifier(precompute=precompute) output_2 = clf.fit(X, y).coef_ assert_array_almost_equal(output_1,", "Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1]", "diabetes.data X = np.c_[X, x, x] # add correlated features", "correct X, y = 3 * diabetes.data, diabetes.target G =", "Check that lars_path is robust to collinearity in input X", ": before, in the middle, and in the last part", "= ignore_warnings _, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) assert", "y, alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): #", "from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import assert_warns from sklearn.utils.testing", "assert ocur == i + 1 else: # no more", "coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter will change') # numpy deprecation def test_lasso_gives_lstsq_solution():", "estname in estimator_parameter_map: params = default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model,", "X, y, method='lar', verbose=10) sys.stdout = old_stdout for i, coef_", "n_targets = Y.shape[1] estimators = [ linear_model.LassoLars(), linear_model.Lars(), # regression", "the above with additional positive # option. However for the", "1) fit_intercept=False and normalize=False # 2) fit_intercept=True and normalize=True #", "axis=0)) skl_betas2 /= normx[:, np.newaxis] assert_array_almost_equal(r2, skl_betas2, decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X',", "with pytest.warns(DeprecationWarning, match='broken'): linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method='lar', positive=True) method =", "train_test_split(X, y, random_state=42) with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test):", "above with additional positive # option. However for the middle", "coef_path_ = linear_model.lars_path( X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9) alpha_,", "checks X = [[1e20, 1e20, 0], [-1e-32, 0, 0], [1,", "copy_X=False, copy_Gram=False, alpha_min=0., method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path():", "lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_lasso_cd_early_stopping(): # Test that LassoLars and", "y.size def test_simple(): # Principle of Lars is to keep", "increases. # This property is not actually guaranteed in general", "res = y - np.dot(X, coef_) cov = np.dot(X.T, res)", "that the optimal alpha # increases as the number of", "lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X = diabetes.data X", "import assert_warns from sklearn.utils.testing import TempMemmap from sklearn.exceptions import ConvergenceWarning", "import linear_model, datasets from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC # TODO:", "right lasso = linear_model.LassoLars() lasso.fit(X, y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X,", "lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an ill-conditioned situation in", "default value of cv') # 0.22 def test_estimatorclasses_positive_constraint(): # testing", "linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X = diabetes.data", "diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd", "rng.shuffle(i) supp = i[:k] w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k,", "0.9, 1e-4] for alpha_min in alphas_min: alphas, _, lasso_path =", "6 # non-zero coefs assert_equal(len(lars.alphas_), 7) @ignore_warnings def test_multitarget(): #", "no more than max_pred variables can go into the active", "matrix n = 5 H = 1. / (np.arange(1, n", "from sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing", "y, method='lar', Gram=G) alpha_, _, coef = linear_model.lars_path( X, y,", "rank # deficient input data (with n_features < rank) in", "back the coefficients returned by sklearn before comparing # against", "y += sigma * rng.rand(*y.shape) y = y.squeeze() lars_alphas, _,", "= [10, 10, 1] alpha = .0001 def objective_function(coef): return", "= \\ linear_model.lars_path(X, y, return_path=True, method=method, positive=False) assert coefs.min() <", "# 0.22 def test_estimatorclasses_positive_constraint(): # testing the transmissibility for the", "# Scenario 1: Let's compare R vs sklearn when fit_intercept=False", "linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False) model_lasso_lars.fit(X, y) skl_betas = model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas,", "= (estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_) for k in range(n_targets): estimator.fit(X,", "Let's compare R vs sklearn when fit_intercept=True and # normalize=True", "(2. * len(X)) * linalg.norm(y - np.dot(X, coef)) ** 2", "lars = linear_model.LassoLars(.1, normalize=False) coef_lars_ = lars.fit(X, y).coef_ obj_lars =", "= linear_model.lars_path( X, y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9) alpha_, _,", "restricted # as compared with the above test without the", "# to the circumstance that the Lars-Lasso algorithm does not", "just make sure it's bounded n_samples = 10 X =", "# now we gonna test the positive option for all", "coef = linear_model.lars_path( X, y, method='lar', return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1])", "descent solver # Also test that lasso_path (using lars_path output", "Also check that the sequence of alphas is always decreasing", "test on the diabetes dataset # ensure that we get", "{}, 'LassoLarsIC': {}} for estname in estimator_parameter_map: params = default_parameter.copy()", "= np.array([1., 0., 0]) rng = np.random.RandomState(0) f = ignore_warnings", "equality_tolerance checks X = [[1e20, 1e20, 0], [-1e-32, 0, 0],", "normalize=False).fit(X, y) clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y) err =", "# Test that the ``return_path=False`` option with Gram remains correct", "covariances tied and decreasing # also test verbose output from", "whereas sklearn # does not do that, therefore, we need", "assert_array_almost_equal(path[k], estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred) @pytest.mark.filterwarnings('ignore: The default value of", "give the # same results when using the positive option", "linear_model.lars_path(X, y, method=method) output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method)", "coef_path_ = linear_model.lars_path( X, y, method='lar', verbose=10) sys.stdout = old_stdout", "estimator = getattr(linear_model, estname)(positive=False, **params) estimator.fit(X, y) assert estimator.coef_.min() <", "_, lars_coef = linear_model.lars_path(X, y, method='lasso') _, lasso_coef2, _ =", "normalize=TRUE) # r2 = t(model_lasso_lars2$beta) r2 = np.array([[0, 0, 0,", "# use un-normalized dataset clf = linear_model.LassoLars(alpha=0.) clf.fit(X1, y) #", "Lars-Lasso algorithm and start to # diverge thereafter. See #", "_, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True,", "import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less", "the diabetes dataset # ensure that we get negative coefficients", "# for method 'lar' (default) and lasso # Once deprecation", "y, method='lar', Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ ==", "test_lasso_gives_lstsq_solution(): # Test that Lars Lasso gives least square solution", "Gram=G, n_samples=n_samples, method=method, return_path=return_path), linear_model.lars_path( X, y, Gram=G, method=method, return_path=return_path))", "in estimator_parameter_map: params = default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False,", "-1]) assert alpha_ == alphas_[-1] @pytest.mark.filterwarnings('ignore: The default value of", "copy_X is not being overridden (it was until at least", "alpha_ == alphas_[-1] @pytest.mark.filterwarnings('ignore: The default value of cv') #", "check that the sequence of alphas is always decreasing assert", "to make an adaptations. See below. # not normalized data", "least some features get added if necessary # test for", "be of length 6 + 1 in a Lars going", "= X.copy() y = X[:, 2] lasso_lars.fit(X, y) assert copy_X", "values # for a range of alphas, we had to", "normalize is equal to True, R returns the coefficients in", "the same result as lars_path and previous lasso output style", "but this is no longer the case with the #", "= getattr(linear_model, estname)(positive=False, **params) estimator.fit(X, y) assert estimator.coef_.min() < 0", "The path should be of length 6 + 1 in", "linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError, lars_broken.fit, X, y) def test_lars_path_readonly_data(): # When using", "_assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path), linear_model.lars_path( X, y,", "+ 1) + np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n))", "0.01) # same test, with normalization for alpha_min in alphas_min:", "########################################################################### # Scenario 1: Let's compare R vs sklearn when", "-84.033390591756657], [0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0,", "result (read the note above) temp = X - np.mean(X,", "y, return_path=True, method=method, positive=False) assert coefs.min() < 0 _, _,", "test_x_none_gram_none_raises_value_error(): # Test that lars_path with no X and Gram", "default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False, **params) estimator.fit(X, y) assert", "normx = np.sqrt(np.sum(temp ** 2, axis=0)) skl_betas2 /= normx[:, np.newaxis]", "# Generate data n, m = 70, 100 k =", "_ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1)", "alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X,", "def test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]), 6)", "= linear_model.lars_path(X, y, Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0., method='lasso', verbose=0, max_iter=500)", "set assert ocur == X.shape[1] def _assert_same_lars_path_result(output1, output2): assert_equal(len(output1), len(output2))", "n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42)", "= np.zeros(n_samples) _, _, coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False,", "= LassoLarsIC(precompute=False) rng = np.random.RandomState(0) X = rng.normal(0, 1, (100,", "sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing import", "# 0.22 def test_lars_cv(): # Test the LassoLarsCV object by", "LARS and coordinate # descent give the same answers #", "X and Gram raises exception Xy = np.dot(X.T, y) assert_raises(ValueError,", "for alpha in np.linspace(1e-2, 1 - 1e-2, 20): clf1 =", "-83.784156932888934, -84.033390591756657], [0, 0, 0, 0, -0.476624256777266, 0, 0, 0,", "tol=1e-8, normalize=False).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3)", "tol=1e-4, normalize=False) cd_coef_ = coord_descent.fit(X, y).coef_ cd_obj = objective_function(cd_coef_) assert_less(lars_obj,", "rng = np.random.RandomState(42) # Generate data n, m = 70,", "old_stdout = sys.stdout try: sys.stdout = StringIO() _, _, coef_path_", "= linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True)", "skl_betas = model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas, decimal=12) ########################################################################### ########################################################################### # Scenario", "when positive=False # and all positive when positive=True # for", "in np.linspace(1e-2, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X,", "the given steps chosen. old_alpha = 0 lars_cv = linear_model.LassoLarsCV()", "@pytest.mark.parametrize('method', ['lar', 'lasso']) @pytest.mark.parametrize('return_path', [True, False]) def test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result(", "Xy=Xy, method=method) for expected, got in zip(output, output_pre): assert_array_almost_equal(expected, got)", ">= '1.14' else -1 coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_,", "-8.52445396, 6.12277822, -19.42109366]) x = np.array([[0.47299829, 0, 0, 0, 0],", "None]: clf = classifier(precompute=precompute) output_2 = clf.fit(X, y).coef_ assert_array_almost_equal(output_1, output_2,", "* linalg.norm(y - np.dot(X, coef_lars_)) ** 2 + .1 *", "and coordinate # descent give the same answers # Note", "for method in 'lar', 'lasso': output = linear_model.lars_path(X, y, method=method)", "n, m = 70, 100 k = 5 X =", "# # library(lars) # model_lasso_lars2 = lars(X, t(y), type=\"lasso\", intercept=TRUE,", "assert copy_X == np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_fit_copyX_behaviour(copy_X):", "n_features < rank) in the same way # as coordinate", "middle part, the comparison of coefficient values # for a", "2: Let's compare R vs sklearn when fit_intercept=True and #", "when fit_intercept=False and # normalize=False ########################################################################### # # The R", "0], [0.08239882, 0.85784863, 0, 0, 0], [0.30114139, -0.07501577, 0.80895216, 0,", "# - some good features are selected. # - alpha_bic", "lasso_path = linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8,", "coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_ = coord_descent.fit(X, y).coef_ obj_cd", "least-squares-solution for small alphas, see 'Least Angle Regression' # by", "0], [0, 0, 0, 8.371887668009453, 19.463768371044026], [0, 0, 0, 0,", "model_lasso_lars2 = lars(X, t(y), type=\"lasso\", intercept=TRUE, # trace=TRUE, normalize=TRUE) #", "X.shape[1]: assert ocur == i + 1 else: # no", "units, that is, they are rescaled back, whereas sklearn #", "(using lars_path output style) gives # the same result as", "lars.coef_ lars_obj = objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False) cd_coef_", "alpha = .0001 def objective_function(coef): return (1. / (2. *", "@pytest.mark.filterwarnings('ignore:`rcond` parameter will change') # numpy deprecation def test_lasso_gives_lstsq_solution(): #", "coefficient values # for a range of alphas, we had", "due # to the circumstance that the Lars-Lasso algorithm does", "# non-zero coefs assert_equal(len(lars.alphas_), 7) @ignore_warnings def test_multitarget(): # Assure", "= diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T,", "and in the last part of the path) alphas_min =", "style) gives # the same result as lars_path and previous", "np.array([1, 1]) _, _, coef_path = linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0,", "add correlated features lars_cv = linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y) assert len(w)", "``return_path=False`` option returns the correct output alphas_, _, coef_path_ =", "when positive=True # for method 'lar' (default) and lasso #", "skl_betas2, decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_copyX_behaviour(copy_X): \"\"\" Test", "linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for c, a in zip(lasso_path.T, alphas): if", "at least version 0.21) \"\"\" lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False) rng", "1.]]) y1 = np.array([1, 1]) _, _, coef_path = linear_model.lars_path(X1,", "= ((1. / (2. * 3.)) * linalg.norm(y - np.dot(X,", "0.01) # similar test, with the classifiers for alpha in", "assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import assert_raises from", "2.231558436628169, 2.723267514525966, 2.811549786389614, 2.813766976061531, 2.817462468949557, 2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637,", "back, whereas sklearn # does not do that, therefore, we", "# descent give the same answers # Note it used", "model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas, decimal=12) ########################################################################### ########################################################################### # Scenario 2: Let's", "* X # use un-normalized dataset clf = linear_model.LassoLars(alpha=0.) clf.fit(X1,", "< X.shape[1]: assert ocur == i + 1 else: #", "= np.array([[1, 1.], [1., 1.]]) y1 = np.array([1, 1]) _,", "mode # This is a non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/4597", "lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01)", "not fail despite copy=False _lars_path_residues(X_train, y_train, X_test, y_test, copy=False) @pytest.mark.filterwarnings('ignore:", "LassoLarsCV object by checking that the optimal alpha # increases", "# r = t(model_lasso_lars$beta) # r = np.array([[0, 0, 0,", "(np.arange(1, n + 1) + np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit(", ".1 * linalg.norm(coef_lars_, 1)) coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_", "deprecation of LAR + positive option is done use these:", "lasso_path = linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8)", "output_2 = clf.fit(X, y).coef_ assert_array_almost_equal(output_1, output_2, decimal=8) def test_singular_matrix(): #", "lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) for c, a in zip(lasso_path.T, alphas):", "2., 0.], [1., 1., 0]]) y = np.array([1., 0., 0])", "more than max_pred variables can go into the active set", "least square solution at the end # of the path", "in 'lar', 'lasso': output = linear_model.lars_path(X, y, method=method) output_pre =", "with normalization for alpha_min in alphas_min: alphas, _, lasso_path =", "-79.810362809499026, -83.528788732782829, -83.777653739190711, -83.784156932888934, -84.033390591756657], [0, 0, 0, 0, -0.476624256777266,", "the middle, and in the last part of the path)", "# Test that the ``return_path=False`` option with Gram and Xy", "test_lars_lstsq(): # Test that Lars gives least square solution at", "0.01) def test_lasso_lars_vs_lasso_cd_early_stopping(): # Test that LassoLars and Lasso using", "def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso lars on a very ill-conditioned", "0, -1.218422599914637, -3.457726183014808, -4.021304522060710, -45.827461592423745, -47.776608869312305, -47.911561610746404, -47.914845922736234, -48.039562334265717]]) model_lasso_lars", "def test_collinearity(): # Check that lars_path is robust to collinearity", "congruence up to # the smallest alpha reached by the", "coordinate descent give the # same results when using the", "blow up, and stays somewhat close to a solution given", "@pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_fit_copyX_behaviour(copy_X): \"\"\" Test that user input", "for small alphas, see 'Least Angle Regression' # by Efron", "parameter will change') # numpy deprecation def test_lasso_gives_lstsq_solution(): # Test", "as w: rng = np.random.RandomState(42) x = rng.randn(len(y)) X =", "the active set assert ocur == X.shape[1] def _assert_same_lars_path_result(output1, output2):", "Xy = np.dot(X.T, y) alphas_, _, coef_path_ = linear_model.lars_path( X,", "0.01) def test_lasso_lars_path_length(): # Test that the path length of", "at the end # of the path _, _, coef_path_", "fail despite copy=False _lars_path_residues(X_train, y_train, X_test, y_test, copy=False) @pytest.mark.filterwarnings('ignore: The", "for alpha in np.linspace(6e-1, 1 - 1e-2, 20): clf1 =", "lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False) rng = np.random.RandomState(0) X = rng.normal(0,", "= linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y) assert len(w) == 0 def test_lasso_lars_ic():", "# TODO: use another dataset that has multiple drops diabetes", "end # of the path _, _, coef_path_ = linear_model.lars_path(X,", "Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0., method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def", "classifiers for alpha in np.linspace(1e-2, 1 - 1e-2, 20): clf1", "assert_greater from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from", "here default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1},", "Test when input is a singular matrix X1 = np.array([[1,", "with the above test without the positive option. This is", "diverge thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in np.linspace(6e-1, 1", "np.array([[0, 0, 0, 0, 0], [0, 0, 0, 8.371887668009453, 19.463768371044026],", "alpha_min=0., method='lasso', verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path(): # Test", "-3.457726183014808, -4.021304522060710, -45.827461592423745, -47.776608869312305, -47.911561610746404, -47.914845922736234, -48.039562334265717]]) model_lasso_lars = linear_model.LassoLars(alpha=0,", "- n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic')", "lars_path output style) gives # the same result as lars_path", "[[10, 10, 0], [1e-32, 0, 0], [0, 0, 1]] ):", "np.array([[3., 3., 1.], [2., 2., 0.], [1., 1., 0]]) y", "sklearn.utils.testing import assert_less from sklearn.utils.testing import assert_greater from sklearn.utils.testing import", "basically a copy of the above with additional positive #", "intercept=FALSE, # trace=TRUE, normalize=FALSE) # r = t(model_lasso_lars$beta) # r", "had to use the drop for good # strategy for", "scenarios: # 1) fit_intercept=False and normalize=False # 2) fit_intercept=True and", "assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] @pytest.mark.filterwarnings('ignore: The default", "lasso_cd.coef_) assert_less(error, 0.01) # similar test, with the classifiers for", "that lars_path with precomputed Gram and Xy gives the right", "estimators receiving multidimensional y do the right thing Y =", "with additional positive # option. However for the middle part,", "assert coefs.min() < 0 _, _, coefs = \\ linear_model.lars_path(X,", "np.linalg.lstsq(X1, y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter will change') #", "coefs = \\ linear_model.lars_path(X, y, return_path=True, method=method, positive=False) assert coefs.min()", "against the R result (read the note above) temp =", "# r2 = t(model_lasso_lars2$beta) r2 = np.array([[0, 0, 0, 0,", "TODO: use another dataset that has multiple drops diabetes =", "not np.isnan(coef_path_).any() residual = np.dot(X, coef_path_[:, -1]) - y assert_less((residual", "fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an ill-conditioned", "sys.stdout = old_stdout for i, coef_ in enumerate(coef_path_.T): res =", "in zip(output, output_pre): assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore: `rcond` parameter will change')", "objective_function(coef): return (1. / (2. * len(X)) * linalg.norm(y -", "not being overridden (it was until at least version 0.21)", "do this step before comparing # their results. ########################################################################### #", "0.21) \"\"\" lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False) rng = np.random.RandomState(0) X", "# model_lasso_lars2 = lars(X, t(y), type=\"lasso\", intercept=TRUE, # trace=TRUE, normalize=TRUE)", "n + 1) + np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H,", "pytest.warns(DeprecationWarning, match='broken'): linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method='lar', positive=True) method = 'lasso'", "test_lars_path_positive_constraint(): # this is the main test for the positive", "coefficients returned by sklearn before comparing # against the R", "coef = linear_model.lars_path( X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False)", "the LassoLarsIC object by checking that # - some good", "dataset clf = linear_model.LassoLars(alpha=0.) clf.fit(X1, y) # Avoid FutureWarning about", "lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) for", "np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def test_collinearity(): # Check that", "the circumstance that the Lars-Lasso algorithm does not converge to", "the coordinate descent solver # Also test that lasso_path (using", "in # their original units, that is, they are rescaled", "np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) # test error", "assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path(): # Test that the ``return_path=False`` option", "ConvergenceWarning from sklearn import linear_model, datasets from sklearn.linear_model.least_angle import _lars_path_residues,", "old_stdout def test_simple_precomputed(): # The same, with precomputed Gram matrix", "coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter will", "# Scenario 2: Let's compare R vs sklearn when fit_intercept=True", "y) for method in 'lar', 'lasso': output = linear_model.lars_path(X, y,", "using the positive option # This test is basically a", "= alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_)", "in which the LARS has to go # far in", "from sklearn.utils.testing import assert_raises from sklearn.utils.testing import ignore_warnings from sklearn.utils.testing", "-1]) assert alpha_ == alphas_[-1] def test_no_path_precomputed(): # Test that", "the least-squares-solution for small alphas, see 'Least Angle Regression' #", "main test for the positive parameter on the lars_path method", "be the case that Lars had to use the drop", "range(n_targets): estimator.fit(X, Y[:, k]) y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k],", "\\ linear_model.lars_path(X, y, return_path=True, method=method, positive=False) assert coefs.min() < 0", "in the bug report 7778 y = np.array([-6.45006793, -3.51251449, -8.52445396,", "positive # option. However for the middle part, the comparison", "# fold data is in read-only mode # This is", "agrees with the LassoLars # implementation available in R (lars", "return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_precomputed():", "0 lars_cv = linear_model.LassoLarsCV() for length in (400, 200, 100):", "@pytest.mark.filterwarnings('ignore::FutureWarning') def test_lars_cv_max_iter(): with warnings.catch_warnings(record=True) as w: rng = np.random.RandomState(42)", "up to # the smallest alpha reached by the Lars-Lasso", "testing the transmissibility for the positive option of all estimator", "method in 'lar', 'lasso': output = linear_model.lars_path(X, y, method=method) output_pre", "go # far in the path to converge, and check", "middle, and in the last part of the path) alphas_min", "from sklearn.exceptions import ConvergenceWarning from sklearn import linear_model, datasets from", "return_path=return_path), linear_model.lars_path( X, y, Gram=G, method=method, return_path=return_path)) def test_x_none_gram_none_raises_value_error(): #", "10]], [[10, 10, 0], [1e-32, 0, 0], [0, 0, 1]]", "lasso output style # under these conditions. rng = np.random.RandomState(42)", "y) def test_lars_path_readonly_data(): # When using automated memory mapping on", "# diabetes['target'], method='lar', positive=True) with pytest.warns(DeprecationWarning, match='broken'): linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True,", "# as compared with the above test without the positive", "descent give the # same results when using the positive", "False]) def test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy, Gram=G, n_samples=n_samples, method=method,", "Gram remains correct alphas_, _, coef_path_ = linear_model.lars_path( X, y,", "lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def", "and lasso # Once deprecation of LAR + positive option", "(with n_features < rank) in the same way # as", "Let's rescale back the coefficients returned by sklearn before comparing", "to turn off normalization lars = linear_model.LassoLars(.1, normalize=False) coef_lars_ =", "lasso lars on a very ill-conditioned design, and check that", "This is a non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data =", "in congruence up to # the smallest alpha reached by", "on the lars_path method # the estimator classes just make", "y, method='lasso', Xy=Xy, Gram=G, alpha_min=0.9) alpha_, _, coef = linear_model.lars_path(", "_, coef = linear_model.lars_path( X, y, method='lar', return_path=False) assert_array_almost_equal(coef, coef_path_[:,", "in ( [[5, 0], [0, 5], [10, 10]], [[10, 10,", "method=method, return_path=return_path), linear_model.lars_path( X, y, Gram=G, method=method, return_path=return_path)) def test_x_none_gram_none_raises_value_error():", "# does not do that, therefore, we need to do", "linear_model.lars_path(X, y, method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True)", "np.dot(X.T, X) Xy = np.dot(X.T, y) n_samples = y.size def", "import pytest from scipy import linalg from sklearn.model_selection import train_test_split", "G = np.dot(X.T, X) clf = classifier(precompute=G) output_1 = ignore_warnings(clf.fit)(X,", "Let's compare R vs sklearn when fit_intercept=False and # normalize=False", "_, coef_path_ = linear_model.lars_path( X, y, method='lar', Gram=G) alpha_, _,", "lars_path is robust to collinearity in input X = np.array([[3.,", "= np.dot(X.T, X) Xy = np.dot(X.T, y) n_samples = y.size", "= linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for c, a in zip(lasso_path.T, alphas):", "from sklearn.utils.testing import assert_warns from sklearn.utils.testing import TempMemmap from sklearn.exceptions", "X = diabetes.data X = np.c_[X, rng.randn(X.shape[0], 5)] # add", "above) temp = X - np.mean(X, axis=0) normx = np.sqrt(np.sum(temp", "[[5, 0], [0, 5], [10, 10]], [[10, 10, 0], [1e-32,", "matrix _, _, coef_path_ = linear_model.lars_path( X, y, Gram=G, method='lar')", "give the # same results. X = 3 * diabetes.data", "values of precompute G = np.dot(X.T, X) clf = classifier(precompute=G)", "0, 0], [1, 1, 1]] y = [10, 10, 1]", "= diabetes.data[:length] y = diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha", "np.array([[0.47299829, 0, 0, 0, 0], [0.08239882, 0.85784863, 0, 0, 0],", "and all positive when positive=True # for method 'lar' (default)", "using coordinate descent give the # same results when early", "skl_betas, decimal=12) ########################################################################### ########################################################################### # Scenario 2: Let's compare R", "# This property is not actually guaranteed in general and", "linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_path_length(): # Test", "== 0 def test_lasso_lars_ic(): # Test the LassoLarsIC object by", "100): X = diabetes.data[:length] y = diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha,", "sklearn before comparing # against the R result (read the", "positive=True) method = 'lasso' _, _, coefs = \\ linear_model.lars_path(X,", "deprecation def test_lars_lstsq(): # Test that Lars gives least square", "cd_coef_ = coord_descent.fit(X, y).coef_ cd_obj = objective_function(cd_coef_) assert_less(lars_obj, cd_obj *", "actually guaranteed in general and is just a # property", "use these: # assert_raises(ValueError, linear_model.lars_path, diabetes['data'], # diabetes['target'], method='lar', positive=True)", "residual = np.dot(X, coef_path_[:, -1]) - y assert_less((residual ** 2).sum(),", "all estimator # classes in this same function here default_parameter", "the end # of the path _, _, coef_path_ =", "Test that the ``return_path=False`` option with Gram remains correct alphas_,", "of length 6 + 1 in a Lars going down", "# # library(lars) # model_lasso_lars = lars(X, t(y), type=\"lasso\", intercept=FALSE,", "G = np.dot(X.T, X) Xy = np.dot(X.T, y) alphas_, _,", "the end # of the path X1 = 3 *", "# 2) fit_intercept=True and normalize=True # Let's generate the data", "0], [0, 0, 1]] ): # To be able to", "0, 0.025219751009936], [0, -3.577397088285891, -4.702795355871871, -7.016748621359461, -7.614898471899412, -0.336938391359179, 0, 0,", "alpha_, _, coef = linear_model.lars_path( X, y, method='lar', Gram=G, return_path=False)", "is right lasso = linear_model.LassoLars() lasso.fit(X, y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2])", "normalize=False) assert_warns(ConvergenceWarning, lars.fit, X, y) lars_coef_ = lars.coef_ lars_obj =", "= model_lasso_lars2.coef_path_ # Let's rescale back the coefficients returned by", "compared with the above test without the positive option. This", "coefficients are typically in congruence up to # the smallest", "tol=1e-8) for c, a in zip(lasso_path.T, alphas): if a ==", "lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check", "option # This test is basically a copy of the", "Note: When normalize is equal to True, R returns the", "the main test for the positive parameter on the lars_path", "option with Gram remains correct alphas_, _, coef_path_ = linear_model.lars_path(", "assert_less(error, 0.01) # same test, with normalization for alpha_min in", "positive option # This test is basically a copy of", "= linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X, y) clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha,", "[0, 0, 0, 0, -0.476624256777266, 0, 0, 0, 0, 0.025219751009936],", "data n, m = 70, 100 k = 5 X", "y ** 2]).T n_targets = Y.shape[1] estimators = [ linear_model.LassoLars(),", "path = (estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_) for k in range(n_targets):", "number of samples increases. # This property is not actually", "by the coordinate descent solver # Also test that lasso_path", "error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # The range", "sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing import", "= t(model_lasso_lars2$beta) r2 = np.array([[0, 0, 0, 0, 0], [0,", "def test_rank_deficient_design(): # consistency test that checks that LARS Lasso", "for X in ( [[5, 0], [0, 5], [10, 10]],", "lasso_cd = linear_model.Lasso(fit_intercept=True, normalize=True, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y)", "regression test for gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ] for estimator in", "test_no_path_all_precomputed(): # Test that the ``return_path=False`` option with Gram and", "alpha_aic # - n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic", "X1 = 3 * X # use un-normalized dataset clf", "lars_broken.fit, X, y) def test_lars_path_readonly_data(): # When using automated memory", "# Let's generate the data used in the bug report", "res) C = np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C", "decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an ill-conditioned situation in which", "path) alphas_min = [10, 0.9, 1e-4] for alpha_min in alphas_min:", "that user input to .fit for copy_X overrides default __init__", "change') # numpy deprecation def test_lars_lstsq(): # Test that Lars", "_, _, coef_path_ = f(linear_model.lars_path)(X, y, alpha_min=0.01) assert not np.isnan(coef_path_).any()", "\"\"\" lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False) rng = np.random.RandomState(0) X =", "rng = np.random.RandomState(0) X = rng.normal(0, 1, (100, 5)) X_copy", "method=method) output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method) for expected,", "same test, with normalized data X = diabetes.data alphas, _,", "estimator.coef_, estimator.coef_path_) for k in range(n_targets): estimator.fit(X, Y[:, k]) y_pred", "lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng = np.random.RandomState(42) X", "test_lasso_lars_copyX_behaviour(copy_X): \"\"\" Test that user input regarding copy_X is not", "X) Xy = np.dot(X.T, y) n_samples = y.size def test_simple():", "0]]) y = np.array([1., 0., 0]) rng = np.random.RandomState(0) f", "clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): lars", "general and is just a # property of the given", "than max_pred variables can go into the active set assert", "just a # property of the given dataset, with the", "np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps <", "X_test, y_train, y_test): # The following should not fail despite", "# The following should not fail despite copy=False _lars_path_residues(X_train, y_train,", "linear_model.lars_path(X, y, method='lasso') _, lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas,", "as the number of samples increases. # This property is", "go into the active set assert ocur == X.shape[1] finally:", "sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_equal from sklearn.utils.testing import", "0, 0], [0.30114139, -0.07501577, 0.80895216, 0, 0], [-0.01460346, -0.1015233, 0.0407278,", "linalg.norm(y - np.dot(X, coef)) ** 2 + alpha * linalg.norm(coef,", "for coefficient comparison here is restricted # as compared with", "y = np.dot(X, w) sigma = 0.2 y += sigma", "samples increases. # This property is not actually guaranteed in", "output_1 = ignore_warnings(clf.fit)(X, y).coef_ for precompute in [True, False, 'auto',", "is no longer the case with the # equality_tolerance checks", "comparison here is restricted # as compared with the above", "= X - np.mean(X, axis=0) normx = np.sqrt(np.sum(temp ** 2,", "when input is a singular matrix X1 = np.array([[1, 1.],", "assert not hasattr(lars_cv, 'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning') def test_lars_cv_max_iter(): with warnings.catch_warnings(record=True) as", "def test_lasso_lars_vs_lasso_cd_early_stopping(): # Test that LassoLars and Lasso using coordinate", "of the path X1 = 3 * X # use", "assert_less(error, 0.01) def test_lasso_lars_vs_lasso_cd_early_stopping(): # Test that LassoLars and Lasso", "linear_model.lars_path(X, y, method='lasso') coef_lstsq = np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1])", "variables can go into the active set assert ocur ==", "lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True) for c, a in", "5], [10, 10]], [[10, 10, 0], [1e-32, 0, 0], [0,", "= 3 * diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y,", "* linalg.norm(y - np.dot(X, coef)) ** 2 + alpha *", "the path to converge, and check that LARS and coordinate", "# of the path X1 = 3 * X #", "y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) #", "Hilbert matrix n = 5 H = 1. / (np.arange(1,", "returned by sklearn before comparing # against the R result", "-1] - lasso_cd.coef_) assert_less(error, 0.01) # same test, with normalization", "function, # we need to turn off normalization lars =", "lars = linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit, X, y) lars_coef_ =", "LooseVersion import numpy as np import pytest from scipy import", "gives the right answer G = np.dot(X.T, X) Xy =", "k = 5 X = rng.randn(n, m) w = np.zeros((m,", "w = np.zeros((m, 1)) i = np.arange(0, m) rng.shuffle(i) supp", "def test_lars_path_gram_equivalent(method, return_path): _assert_same_lars_path_result( linear_model.lars_path_gram( Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path),", "using automated memory mapping on large input, the # fold", "# 0.22 def test_lars_path_positive_constraint(): # this is the main test", "value of cv') # 0.22 def test_lars_cv(): # Test the", "Lars gives least square solution at the end # of", "different values of precompute G = np.dot(X.T, X) clf =", "= linear_model.LassoLars(alpha=alpha, normalize=False) assert_warns(ConvergenceWarning, lars.fit, X, y) lars_coef_ = lars.coef_", "old_alpha = 0 lars_cv = linear_model.LassoLarsCV() for length in (400,", "when using the positive option # This test is basically", "0, 0, 0, -79.810362809499026, -83.528788732782829, -83.777653739190711, -83.784156932888934, -84.033390591756657], [0, 0,", "lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]), 6) # The path should be of", "y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_,", "alpha_min in alphas_min: alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso',", "sklearn.model_selection import train_test_split from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import", "* linalg.norm(coef_lars_, 1)) coord_descent = linear_model.Lasso(.1, tol=1e-6, normalize=False) coef_cd_ =", "= rng.randn(len(y)) X = diabetes.data X = np.c_[X, x, x]", "_, _, coef_path_ = linear_model.lars_path( X, y, Gram=G, method='lar') for", "a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0 lasso_cd.alpha =", "is not being overridden (it was until at least version", "max_pred variables can go into the active set assert ocur", "is handling rank # deficient input data (with n_features <", "= np.dot(X.T, y) n_samples = y.size def test_simple(): # Principle", "tied and decreasing # also test verbose output from io", "= classifier(precompute=G) output_1 = ignore_warnings(clf.fit)(X, y).coef_ for precompute in [True,", "lars_obj = objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False) cd_coef_ =", "Test that user input to .fit for copy_X overrides default", "additional positive # option. However for the middle part, the", "that the ``return_path=False`` option returns the correct output alphas_, _,", "= np.dot(X.T, X) clf = classifier(precompute=G) output_1 = ignore_warnings(clf.fit)(X, y).coef_", "the positive option. This is due # to the circumstance", "diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y)", "the bug report 7778 y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822,", "(default) and lasso # Once deprecation of LAR + positive", "stopping is used. # (test : before, in the middle,", "= Y.shape[1] estimators = [ linear_model.LassoLars(), linear_model.Lars(), # regression test", "test_lars_add_features(): # assure that at least some features get added", "= (1. / (2. * 3.) * linalg.norm(y - np.dot(X,", "# their results. ########################################################################### # # The R result was", "= linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # same test, with", "it used to be the case that Lars had to", "= 3 * diabetes.data, diabetes.target G = np.dot(X.T, X) Xy", "y do the right thing Y = np.vstack([y, y **", "= [10, 0.9, 1e-4] for alpha_min in alphas_min: alphas, _,", "y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) def test_rank_deficient_design(): # consistency", "np.arange(n)) assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X,", "np.newaxis] assert_array_almost_equal(r2, skl_betas2, decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_copyX_behaviour(copy_X):", "the case with the # equality_tolerance checks X = [[1e20,", "X = diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso')", "# model_lasso_lars = lars(X, t(y), type=\"lasso\", intercept=FALSE, # trace=TRUE, normalize=FALSE)", "= np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366]) x = np.array([[0.47299829, 0,", "tol=1e-8, positive=True) for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't", "test for 6d2b4c # Hilbert matrix n = 5 H", "test for gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ] for estimator in estimators:", "temp = X - np.mean(X, axis=0) normx = np.sqrt(np.sum(temp **", "y).coef_ obj_cd = ((1. / (2. * 3.)) * linalg.norm(y", "# When using automated memory mapping on large input, the", "to collinearity in input X = np.array([[3., 3., 1.], [2.,", "part of the path) alphas_min = [10, 0.9, 1e-4] for", "- eps < abs(cov)]) if i < X.shape[1]: assert ocur", "property is not actually guaranteed in general and is just", "assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) def test_rank_deficient_design(): # consistency test", "test_multitarget(): # Assure that estimators receiving multidimensional y do the", "for precompute in [True, False, 'auto', None]: clf = classifier(precompute=precompute)", "Gram matrix _, _, coef_path_ = linear_model.lars_path( X, y, Gram=G,", "end # of the path X1 = 3 * X", "somewhat close to a solution given # by the coordinate", "coef_path_ = linear_model.lars_path( X, y, Gram=G, method='lar') for i, coef_", "clf2.coef_) assert_less(err, 1e-3) # same test, with normalized data X", "test_lasso_lars_fit_copyX_behaviour(copy_X): \"\"\" Test that user input to .fit for copy_X", "'LassoLarsIC': {}} @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22", "lasso # Once deprecation of LAR + positive option is", "normalize=False ########################################################################### # # The R result was obtained using", "np.dot(X.T, y) for method in 'lar', 'lasso': output = linear_model.lars_path(X,", "classes default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1},", "using the following code: # # library(lars) # model_lasso_lars2 =", "zip(output, output_pre): assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore: `rcond` parameter will change') #", "diabetes.data[:length] y = diabetes.target[:length] lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha =", "that at least some features get added if necessary #", "= np.array([[0, 0, 0, 0, 0], [0, 0, 0, 8.371887668009453,", "= X[:, 2] lasso_lars.fit(X, y, copy_X=copy_X) assert copy_X == np.array_equal(X,", "import ConvergenceWarning from sklearn import linear_model, datasets from sklearn.linear_model.least_angle import", "for this but this is no longer the case with", "value of cv') # 0.22 @pytest.mark.parametrize( 'classifier', [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC])", "assert_less(obj_lars, obj_cd * (1. + 1e-8)) def test_lasso_lars_vs_lasso_cd(): # Test", "1 - 1e-2, 20): clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X,", "(test : before, in the middle, and in the last", "given dataset, with the given steps chosen. old_alpha = 0", "> alpha_aic # - n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic')", "lasso_cd.alpha = alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] -", "test without the positive option. This is due # to", "import sys old_stdout = sys.stdout try: sys.stdout = StringIO() _,", "verbose=10) sys.stdout = old_stdout for i, coef_ in enumerate(coef_path_.T): res", "linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check that the", "coordinate descent give the # same results when early stopping", "k], y_pred) @pytest.mark.filterwarnings('ignore: The default value of cv') # 0.22", "LassoLars implementation agrees with the LassoLars # implementation available in", "positive option is done use these: # assert_raises(ValueError, linear_model.lars_path, diabetes['data'],", "their original units, that is, they are rescaled back, whereas", "# regression test for gh-1615 linear_model.LassoLars(fit_intercept=False), linear_model.Lars(fit_intercept=False), ] for estimator", "the lars_path method # the estimator classes just make use", "# Hilbert matrix n = 5 H = 1. /", "- alpha_bic > alpha_aic # - n_nonzero_bic < n_nonzero_aic lars_bic", "precomputed Gram and Xy gives the right answer G =", "be able to use the coefs to compute the objective", "classifier(precompute=precompute) output_2 = clf.fit(X, y).coef_ assert_array_almost_equal(output_1, output_2, decimal=8) def test_singular_matrix():", "1e-2, 20): clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2 = linear_model.Lasso(alpha=alpha,", "False]) def test_lasso_lars_copyX_behaviour(copy_X): \"\"\" Test that user input regarding copy_X", "if necessary # test for 6d2b4c # Hilbert matrix n", "it does not blow up, and stays somewhat close to", "y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1]", "rng.rand(*y.shape) y = y.squeeze() lars_alphas, _, lars_coef = linear_model.lars_path(X, y,", "0.048162321585148], [0, 0, 0, 2.231558436628169, 2.723267514525966, 2.811549786389614, 2.813766976061531, 2.817462468949557, 2.817368178703816,", "coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_all_precomputed(): # Test", "1e-8)) def test_lasso_lars_vs_lasso_cd(): # Test that LassoLars and Lasso using", "parameter will change') # numpy deprecation def test_lars_lstsq(): # Test", "# Test that lars_path with precomputed Gram and Xy gives", "params = default_parameter.copy() params.update(estimator_parameter_map[estname]) estimator = getattr(linear_model, estname)(positive=False, **params) estimator.fit(X,", "return_path=True, method='lar', positive=True) method = 'lasso' _, _, coefs =", "** 2).sum(), 1.) # just make sure it's bounded n_samples", "= np.vstack([y, y ** 2]).T n_targets = Y.shape[1] estimators =", "def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an ill-conditioned situation in which the", "X[:, 2] lasso_lars.fit(X, y) assert copy_X == np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X',", "estimator.coef_path_) assert_array_almost_equal(Y_pred[:, k], y_pred) @pytest.mark.filterwarnings('ignore: The default value of cv')", "rng = np.random.RandomState(42) X = diabetes.data X = np.c_[X, rng.randn(X.shape[0],", "-0.1015233, 0.0407278, 0.80338378, 0], [-0.69363927, 0.06754067, 0.18064514, -0.0803561, 0.40427291]]) X", "to .fit for copy_X overrides default __init__ value \"\"\" lasso_lars", "regarding copy_X is not being overridden (it was until at", "0, 0], [0, 0, 0, 8.371887668009453, 19.463768371044026], [0, 0, 0,", "Create an ill-conditioned situation in which the LARS has to", "lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_)", "method=method, return_path=return_path)) def test_x_none_gram_none_raises_value_error(): # Test that lars_path with no", "with no X and Gram raises exception Xy = np.dot(X.T,", "random_state=42) with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test): # The", "negative coefficients when positive=False # and all positive when positive=True", "0.22 def test_lars_cv(): # Test the LassoLarsCV object by checking", ">= 0 def test_lasso_lars_vs_lasso_cd_positive(): # Test that LassoLars and Lasso", "the LassoLars is right lasso = linear_model.LassoLars() lasso.fit(X, y) lasso2", "= \\ linear_model.lars_path(X, y, return_path=True, method=method, positive=True) assert coefs.min() >=", "-45.827461592423745, -47.776608869312305, -47.911561610746404, -47.914845922736234, -48.039562334265717]]) model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False)", "def test_no_path_precomputed(): # Test that the ``return_path=False`` option with Gram", "has to go # far in the path to converge,", "method 'lar' (default) and lasso # Once deprecation of LAR", "coef_path = linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) def", "# The same, with precomputed Gram matrix _, _, coef_path_", "0.2 y += sigma * rng.rand(*y.shape) y = y.squeeze() lars_alphas,", "def test_simple(): # Principle of Lars is to keep covariances", "always decreasing assert np.all(np.diff(lasso.alphas_) < 0) def test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test", "automated memory mapping on large input, the # fold data", "np.dot(X, coef_cd_)) ** 2 + .1 * linalg.norm(coef_cd_, 1)) assert_less(obj_lars,", "default_parameter = {'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV':", "False]) def test_lasso_lars_fit_copyX_behaviour(copy_X): \"\"\" Test that user input to .fit", "assert coefs.min() >= 0 # now we gonna test the", "y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic),", "alphas[-1] lasso_cd.fit(X, y) error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error,", "method = 'lasso' _, _, coefs = \\ linear_model.lars_path(X, y,", "err = linalg.norm(clf1.coef_ - clf2.coef_) assert_less(err, 1e-3) # same test,", "to use the drop for good # strategy for this", "assert_array_almost_equal(output_1, output_2, decimal=8) def test_singular_matrix(): # Test when input is", "these conditions. rng = np.random.RandomState(42) # Generate data n, m", "Avoid FutureWarning about default value change when numpy >= 1.14", "lars_cv.fit(X, y) assert len(w) == 0 def test_lasso_lars_ic(): # Test", "that sklearn LassoLars implementation agrees with the LassoLars # implementation", "steps chosen. old_alpha = 0 lars_cv = linear_model.LassoLarsCV() for length", "3 * X # use un-normalized dataset clf = linear_model.LassoLars(alpha=0.)", "= np.dot(X.T, res) C = np.max(abs(cov)) eps = 1e-3 ocur", "normalize=False) coef_cd_ = coord_descent.fit(X, y).coef_ obj_cd = ((1. / (2.", "linear_model.lars_path( X, y, method='lasso', Gram=G, Xy=Xy, alpha_min=0.9, return_path=False) assert_array_almost_equal(coef, coef_path_[:,", "fit_intercept=True, normalize=True) model_lasso_lars2.fit(X, y) skl_betas2 = model_lasso_lars2.coef_path_ # Let's rescale", "False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}}", "test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create an ill-conditioned situation in which the LARS", "for length in (400, 200, 100): X = diabetes.data[:length] y", "i[:k] w[supp] = np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1)", "= linear_model.lars_path( X, y, method='lar', Gram=G) alpha_, _, coef =", "LassoLars is right lasso = linear_model.LassoLars() lasso.fit(X, y) lasso2 =", "# # Note: When normalize is equal to True, R", "linear_model.Lars(fit_intercept=False), ] for estimator in estimators: estimator.fit(X, Y) Y_pred =", ">= 0 # now we gonna test the positive option", "- np.dot(X, coef_lars_)) ** 2 + .1 * linalg.norm(coef_lars_, 1))", "alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2, decimal=1) def test_lasso_lars_vs_lasso_cd_ill_conditioned2(): # Create", "non-zero coefs assert_equal(len(lars.alphas_), 7) @ignore_warnings def test_multitarget(): # Assure that", "lasso_cd.coef_) assert_less(error, 0.01) # The range of alphas chosen for", "c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0 lasso_cd.alpha", "9.901611055290553], [0, 7.495923132833733, 9.245133544334507, 17.389369207545062, 26.971656815643499], [0, 0, -1.569380717440311, -5.924804108067312,", "transmissibility for the positive option of all estimator # classes", "model_lasso_lars2.fit(X, y) skl_betas2 = model_lasso_lars2.coef_path_ # Let's rescale back the", "assert_raises(ValueError, linear_model.lars_path, diabetes['data'], # diabetes['target'], method='lar', positive=True) with pytest.warns(DeprecationWarning, match='broken'):", "same way # as coordinate descent Lasso y = [5,", "= linear_model.LassoLars() lasso.fit(X, y) lasso2 = linear_model.LassoLars(alpha=lasso.alphas_[2]) lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3],", "at the end # of the path X1 = 3", "1)) * (rng.rand(k, 1) + 1) y = np.dot(X, w)", "The R result was obtained using the following code: #", "clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X,", "R (lars library) under the following # scenarios: # 1)", "assert_equal(len(output1), len(output2)) for o1, o2 in zip(output1, output2): assert_allclose(o1, o2)", "alpha_ == alphas_[-1] def test_no_path_precomputed(): # Test that the ``return_path=False``", "normalize=True # Let's generate the data used in the bug", "in [True, False, 'auto', None]: clf = classifier(precompute=precompute) output_2 =", "= diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd", "+ 1e-8)) def test_lasso_lars_vs_lasso_cd(): # Test that LassoLars and Lasso", "o2 in zip(output1, output2): assert_allclose(o1, o2) @pytest.mark.parametrize('method', ['lar', 'lasso']) @pytest.mark.parametrize('return_path',", "copy_X overrides default __init__ value \"\"\" lasso_lars = LassoLarsIC(precompute=False) rng", "y) assert estimator.coef_.min() < 0 estimator = getattr(linear_model, estname)(positive=True, **params)", "lasso_path (using lars_path output style) gives # the same result", "here is restricted # as compared with the above test", "def test_lasso_lars_vs_lasso_cd_positive(): # Test that LassoLars and Lasso using coordinate", "X.shape[1] def _assert_same_lars_path_result(output1, output2): assert_equal(len(output1), len(output2)) for o1, o2 in", "FutureWarning about default value change when numpy >= 1.14 rcond", "- y assert_less((residual ** 2).sum(), 1.) # just make sure", "Xy remains # correct X, y = 3 * diabetes.data,", "sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import", "# Test that the path length of the LassoLars is", "[1, 1, 1]] y = [10, 10, 1] alpha =", "the active set assert ocur == X.shape[1] finally: sys.stdout =", "{'fit_intercept': False} estimator_parameter_map = {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC':", "coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps =", "np.arange(0, m) rng.shuffle(i) supp = i[:k] w[supp] = np.sign(rng.randn(k, 1))", "y) assert copy_X == np.array_equal(X, X_copy) @pytest.mark.parametrize('copy_X', [True, False]) def", "estimator.fit(X, Y[:, k]) y_pred = estimator.predict(X) assert_array_almost_equal(alphas[k], estimator.alphas_) assert_array_almost_equal(active[k], estimator.active_)", "X = diabetes.data X = np.c_[X, x, x] # add", "zip(output1, output2): assert_allclose(o1, o2) @pytest.mark.parametrize('method', ['lar', 'lasso']) @pytest.mark.parametrize('return_path', [True, False])", "coef_lars_)) ** 2 + .1 * linalg.norm(coef_lars_, 1)) coord_descent =", "lasso_coef2, _ = linear_model.lasso_path(X, y, alphas=lars_alphas, tol=1e-6, fit_intercept=False) assert_array_almost_equal(lars_coef, lasso_coef2,", "multidimensional y do the right thing Y = np.vstack([y, y", "# test error on unknown IC lars_broken = linear_model.LassoLarsIC('<unknown>') assert_raises(ValueError,", "1e-2, 20): clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X, y) clf2", "the # equality_tolerance checks X = [[1e20, 1e20, 0], [-1e-32,", "remains correct alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar',", "algorithm and start to # diverge thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff", "This property is not actually guaranteed in general and is", "copy of the above with additional positive # option. However", "def test_lasso_lars_path_length(): # Test that the path length of the", "to the circumstance that the Lars-Lasso algorithm does not converge", "method='lasso', positive=True) lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True) for c,", "# implementation available in R (lars library) under the following", "coef_path_ = linear_model.lars_path(X, y, Gram='auto', copy_X=False, copy_Gram=False, alpha_min=0., method='lasso', verbose=0,", "X.copy() y = X[:, 2] lasso_lars.fit(X, y, copy_X=copy_X) assert copy_X", "option is done use these: # assert_raises(ValueError, linear_model.lars_path, diabetes['data'], #", "diabetes['target'], method='lar', positive=True) with pytest.warns(DeprecationWarning, match='broken'): linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method='lar',", "0, 0], [0.08239882, 0.85784863, 0, 0, 0], [0.30114139, -0.07501577, 0.80895216,", "x = rng.randn(len(y)) X = diabetes.data X = np.c_[X, x,", "of cv') # 0.22 @pytest.mark.parametrize( 'classifier', [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]) def", "stays somewhat close to a solution given # by the", "lars.fit, X, y) lars_coef_ = lars.coef_ lars_obj = objective_function(lars_coef_) coord_descent", "= 1. / (np.arange(1, n + 1) + np.arange(n)[:, np.newaxis])", "def test_lars_add_features(): # assure that at least some features get", "error = linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_R_implementation(): #", "guaranteed in general and is just a # property of", "_, coefs = \\ linear_model.lars_path(X, y, return_path=True, method=method, positive=False) assert", "of LAR + positive option is done use these: #", "0: continue lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c", "classes in this same function here default_parameter = {'fit_intercept': False}", "= X[:, 2] lasso_lars.fit(X, y) assert copy_X == np.array_equal(X, X_copy)", "= linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8, positive=True) for c, a in zip(lasso_path.T[:-1],", "positive=True) with pytest.warns(DeprecationWarning, match='broken'): linear_model.lars_path(diabetes['data'], diabetes['target'], return_path=True, method='lar', positive=True) method", "linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2 = linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y) err", "-47.914845922736234, -48.039562334265717]]) model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False) model_lasso_lars.fit(X, y) skl_betas", "Lars had to use the drop for good # strategy", "Test the LassoLarsCV object by checking that the optimal alpha", "to use the coefs to compute the objective function, #", "alphas[:-1]): # don't include alpha=0 lasso_cd.alpha = a lasso_cd.fit(X, y)", "* diabetes.data, diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T,", "_, coefs = \\ linear_model.lars_path(X, y, return_path=True, method=method, positive=True) assert", "import _lars_path_residues, LassoLarsIC # TODO: use another dataset that has", "= .0001 def objective_function(coef): return (1. / (2. * len(X))", "(2. * 3.) * linalg.norm(y - np.dot(X, coef_lars_)) ** 2", "np.dot(X, coef)) ** 2 + alpha * linalg.norm(coef, 1)) lars", "sklearn.exceptions import ConvergenceWarning from sklearn import linear_model, datasets from sklearn.linear_model.least_angle", "t(model_lasso_lars$beta) # r = np.array([[0, 0, 0, 0, 0, -79.810362809499026,", "X = rng.normal(0, 1, (100, 5)) X_copy = X.copy() y", "assert_less((residual ** 2).sum(), 1.) # just make sure it's bounded", "1, 1]] y = [10, 10, 1] alpha = .0001", "test_estimatorclasses_positive_constraint(): # testing the transmissibility for the positive option of", "= linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # The range of", "length of the LassoLars is right lasso = linear_model.LassoLars() lasso.fit(X,", "= np.random.RandomState(42) X = diabetes.data X = np.c_[X, rng.randn(X.shape[0], 5)]", "fit_intercept=False and normalize=False # 2) fit_intercept=True and normalize=True # Let's", "To be able to use the coefs to compute the", "0], [0.30114139, -0.07501577, 0.80895216, 0, 0], [-0.01460346, -0.1015233, 0.0407278, 0.80338378,", "np.dot(X, w) sigma = 0.2 y += sigma * rng.rand(*y.shape)", "[0, 0, 0, 8.371887668009453, 19.463768371044026], [0, 0, 0, 0, 9.901611055290553],", "i < X.shape[1]: assert ocur == i + 1 else:", "lasso_cd.coef_) assert_less(error, 0.01) # same test, with normalization for alpha_min", "o2) @pytest.mark.parametrize('method', ['lar', 'lasso']) @pytest.mark.parametrize('return_path', [True, False]) def test_lars_path_gram_equivalent(method, return_path):", "a copy of the above with additional positive # option.", "< 0 estimator = getattr(linear_model, estname)(positive=True, **params) estimator.fit(X, y) assert", "< n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic = linear_model.LassoLarsIC('aic') rng =", "precompute in [True, False, 'auto', None]: clf = classifier(precompute=precompute) output_2", "case that Lars had to use the drop for good", "= linear_model.lars_path(X, y, method='lasso') lasso_cd = linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for", "some features get added if necessary # test for 6d2b4c", "test for the positive parameter on the lars_path method #", "np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1])", "of the path) alphas_min = [10, 0.9, 1e-4] for alpha_min", "of alphas chosen for coefficient comparison here is restricted #", "0, 0], [0, 0, 1]] ): # To be able", "expected, got in zip(output, output_pre): assert_array_almost_equal(expected, got) @pytest.mark.filterwarnings('ignore: `rcond` parameter", "alpha_, _, coef = linear_model.lars_path( X, y, method='lar', return_path=False) assert_array_almost_equal(coef,", "gives least square solution at the end # of the", "that the ``return_path=False`` option with Gram remains correct alphas_, _,", "= linear_model.lars_path(X, y, method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha", "X_copy) @pytest.mark.parametrize('copy_X', [True, False]) def test_lasso_lars_fit_copyX_behaviour(copy_X): \"\"\" Test that user", "** 2 + .1 * linalg.norm(coef_lars_, 1)) coord_descent = linear_model.Lasso(.1,", "least version 0.21) \"\"\" lasso_lars = LassoLarsIC(copy_X=copy_X, precompute=False) rng =", "** 2, axis=0)) skl_betas2 /= normx[:, np.newaxis] assert_array_almost_equal(r2, skl_betas2, decimal=12)", "for k in range(n_targets): estimator.fit(X, Y[:, k]) y_pred = estimator.predict(X)", "into the active set assert ocur == X.shape[1] def _assert_same_lars_path_result(output1,", "sklearn LassoLars implementation agrees with the LassoLars # implementation available", "checking that the optimal alpha # increases as the number", "same results. X = 3 * diabetes.data alphas, _, lasso_path", "import linalg from sklearn.model_selection import train_test_split from sklearn.utils.testing import assert_allclose", "y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366]) x = np.array([[0.47299829,", "able to use the coefs to compute the objective function,", "lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c - lasso_cd.coef_)", "= x.T ########################################################################### # Scenario 1: Let's compare R vs", "for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X, y, random_state=42) with TempMemmap(splitted_data)", "model_lasso_lars = linear_model.LassoLars(alpha=0, fit_intercept=False, normalize=False) model_lasso_lars.fit(X, y) skl_betas = model_lasso_lars.coef_path_", "is not actually guaranteed in general and is just a", "coef_cd_)) ** 2 + .1 * linalg.norm(coef_cd_, 1)) assert_less(obj_lars, obj_cd", "rng.randn(X.shape[0], 5)] # add 5 bad features lars_bic.fit(X, y) lars_aic.fit(X,", "= objective_function(cd_coef_) assert_less(lars_obj, cd_obj * (1. + 1e-8)) def test_lars_add_features():", "the above test without the positive option. This is due", "0.01) # The range of alphas chosen for coefficient comparison", "alphas, active, coef, path = (estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_) for", "max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path(): # Test that the ``return_path=False``", "np.zeros((m, 1)) i = np.arange(0, m) rng.shuffle(i) supp = i[:k]", "This is due # to the circumstance that the Lars-Lasso", "function # we do the test on the diabetes dataset", "good features are selected. # - alpha_bic > alpha_aic #", "result as lars_path and previous lasso output style # under", "from io import StringIO import sys old_stdout = sys.stdout try:", "for c, a in zip(lasso_path.T[:-1], alphas[:-1]): # don't include alpha=0", "assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter will change') # numpy deprecation def", "last part of the path) alphas_min = [10, 0.9, 1e-4]", "following # scenarios: # 1) fit_intercept=False and normalize=False # 2)", "r = np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829, -83.777653739190711,", "option for all estimator classes default_parameter = {'fit_intercept': False} estimator_parameter_map", "m) w = np.zeros((m, 1)) i = np.arange(0, m) rng.shuffle(i)", "# just make sure it's bounded n_samples = 10 X", "linear_model.lars_path(X, y, return_path=True, method=method, positive=False) assert coefs.min() < 0 _,", "and stays somewhat close to a solution given # by", "normx[:, np.newaxis] assert_array_almost_equal(r2, skl_betas2, decimal=12) ########################################################################### @pytest.mark.parametrize('copy_X', [True, False]) def", "y = 3 * diabetes.data, diabetes.target G = np.dot(X.T, X)", "an ill-conditioned situation in which the LARS has to go", "1)) assert_less(obj_lars, obj_cd * (1. + 1e-8)) def test_lasso_lars_vs_lasso_cd(): #", "def test_lasso_gives_lstsq_solution(): # Test that Lars Lasso gives least square", "as (X_train, X_test, y_train, y_test): # The following should not", ".1 * linalg.norm(coef_cd_, 1)) assert_less(obj_lars, obj_cd * (1. + 1e-8))", "of alphas is always decreasing assert np.all(np.diff(lasso.alphas_) < 0) def", "done use these: # assert_raises(ValueError, linear_model.lars_path, diabetes['data'], # diabetes['target'], method='lar',", "y assert_less((residual ** 2).sum(), 1.) # just make sure it's", "a # property of the given dataset, with the given", "when fit_intercept=True and # normalize=True # # Note: When normalize", "# against the R result (read the note above) temp", "under these conditions. rng = np.random.RandomState(42) # Generate data n,", "use un-normalized dataset clf = linear_model.LassoLars(alpha=0.) clf.fit(X1, y) # Avoid", "the path) alphas_min = [10, 0.9, 1e-4] for alpha_min in", "in (400, 200, 100): X = diabetes.data[:length] y = diabetes.target[:length]", "lars on a very ill-conditioned design, and check that #", "diabetes['target'], return_path=True, method='lar', positive=True) method = 'lasso' _, _, coefs", "'LassoLarsIC': {}} for estname in estimator_parameter_map: params = default_parameter.copy() params.update(estimator_parameter_map[estname])", "coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False) cd_coef_ = coord_descent.fit(X, y).coef_ cd_obj", "make an adaptations. See below. # not normalized data X", "positive=False # and all positive when positive=True # for method", "in np.linspace(6e-1, 1 - 1e-2, 20): clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha,", "lars_cv = linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y) assert len(w) == 0 def", "assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None, Xy=Xy) def test_all_precomputed(): # Test", "for estimator in estimators: estimator.fit(X, Y) Y_pred = estimator.predict(X) alphas,", "Gram=G, method=method, return_path=return_path)) def test_x_none_gram_none_raises_value_error(): # Test that lars_path with", "0.22 def test_lars_path_positive_constraint(): # this is the main test for", "coordinate descent solver # Also test that lasso_path (using lars_path", "coefs = \\ linear_model.lars_path(X, y, return_path=True, method=method, positive=True) assert coefs.min()", "0.22 def test_estimatorclasses_positive_constraint(): # testing the transmissibility for the positive", "that the path length of the LassoLars is right lasso", "= {'LassoLars': {'alpha': 0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} for estname", "drop for good # strategy for this but this is", "good # strategy for this but this is no longer", "value of cv') # 0.22 def test_estimatorclasses_positive_constraint(): # testing the", "of cv') # 0.22 def test_lars_cv(): # Test the LassoLarsCV", "0, 0, 0, 0.025219751009936], [0, -3.577397088285891, -4.702795355871871, -7.016748621359461, -7.614898471899412, -0.336938391359179,", "that we get negative coefficients when positive=False # and all", "-1 coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq) @pytest.mark.filterwarnings('ignore:`rcond` parameter", "- 1e-2, 20): clf1 = linear_model.LassoLars(fit_intercept=False, alpha=alpha, normalize=False, positive=True).fit(X, y)", "= linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): lars =", "= StringIO() _, _, coef_path_ = linear_model.lars_path( X, y, method='lar',", "y, method='lar') alpha_, _, coef = linear_model.lars_path( X, y, method='lar',", "output style) gives # the same result as lars_path and", "positive parameter on the lars_path method # the estimator classes", "t(y), type=\"lasso\", intercept=TRUE, # trace=TRUE, normalize=TRUE) # r2 = t(model_lasso_lars2$beta)", "old_stdout for i, coef_ in enumerate(coef_path_.T): res = y -", "get added if necessary # test for 6d2b4c # Hilbert", "thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in np.linspace(6e-1, 1 -", "test_lasso_lars_vs_lasso_cd_ill_conditioned(): # Test lasso lars on a very ill-conditioned design,", "[0, 0, -1.218422599914637, -3.457726183014808, -4.021304522060710, -45.827461592423745, -47.776608869312305, -47.911561610746404, -47.914845922736234, -48.039562334265717]])", "nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic = np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic))", "The default value of cv') # 0.22 def test_estimatorclasses_positive_constraint(): #", "1: Let's compare R vs sklearn when fit_intercept=False and #", "hasattr(lars_cv, 'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning') def test_lars_cv_max_iter(): with warnings.catch_warnings(record=True) as w: rng", "np.all(np.isfinite(clf.coef_)) def test_lars_n_nonzero_coefs(verbose=False): lars = linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]),", "use of this function # we do the test on", "in R (lars library) under the following # scenarios: #", "same test, with normalization for alpha_min in alphas_min: alphas, _,", "[0.08239882, 0.85784863, 0, 0, 0], [0.30114139, -0.07501577, 0.80895216, 0, 0],", "for good # strategy for this but this is no", "ill-conditioned situation in which the LARS has to go #", "datasets from sklearn.linear_model.least_angle import _lars_path_residues, LassoLarsIC # TODO: use another", "objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False) cd_coef_ = coord_descent.fit(X, y).coef_", "Once deprecation of LAR + positive option is done use", "-7.016748621359461, -7.614898471899412, -0.336938391359179, 0, 0, 0.001213370600853, 0.048162321585148], [0, 0, 0,", "Gram and Xy gives the right answer G = np.dot(X.T,", "1) + np.arange(n)[:, np.newaxis]) clf = linear_model.Lars(fit_intercept=False).fit( H, np.arange(n)) assert", "diabetes['data'], # diabetes['target'], method='lar', positive=True) with pytest.warns(DeprecationWarning, match='broken'): linear_model.lars_path(diabetes['data'], diabetes['target'],", "X[:, 2] lasso_lars.fit(X, y, copy_X=copy_X) assert copy_X == np.array_equal(X, X_copy)", "np.c_[X, rng.randn(X.shape[0], 5)] # add 5 bad features lars_bic.fit(X, y)", "# Once deprecation of LAR + positive option is done", "and start to # diverge thereafter. See # https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for", "# - n_nonzero_bic < n_nonzero_aic lars_bic = linear_model.LassoLarsIC('bic') lars_aic =", "to 6 # non-zero coefs assert_equal(len(lars.alphas_), 7) @ignore_warnings def test_multitarget():", "bug report 7778 y = np.array([-6.45006793, -3.51251449, -8.52445396, 6.12277822, -19.42109366])", "``return_path=False`` option with Gram remains correct alphas_, _, coef_path_ =", "Angle Regression' # by Efron et al 2004. The coefficients", "matrix X1 = np.array([[1, 1.], [1., 1.]]) y1 = np.array([1,", "into the active set assert ocur == X.shape[1] finally: sys.stdout", "= lars.coef_ lars_obj = objective_function(lars_coef_) coord_descent = linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False)", "2.813766976061531, 2.817462468949557, 2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637, -3.457726183014808, -4.021304522060710, -45.827461592423745,", "return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] @pytest.mark.filterwarnings('ignore: The", "with TempMemmap(splitted_data) as (X_train, X_test, y_train, y_test): # The following", "test_lasso_lars_vs_R_implementation(): # Test that sklearn LassoLars implementation agrees with the", "will change') # numpy deprecation def test_lars_lstsq(): # Test that", "5] for X in ( [[5, 0], [0, 5], [10,", "np.dot(X, coef_) cov = np.dot(X.T, res) C = np.max(abs(cov)) eps", "method='lasso', alpha_min=alpha_min) lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8) lasso_cd.alpha = alphas[-1] lasso_cd.fit(X,", "(2. * 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2", "features lars_bic.fit(X, y) lars_aic.fit(X, y) nonzero_bic = np.where(lars_bic.coef_)[0] nonzero_aic =", "= np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def test_collinearity(): # Check", "'LassoLarsCV': {}, 'LassoLarsIC': {}} @pytest.mark.filterwarnings('ignore: The default value of cv')", "test_rank_deficient_design(): # consistency test that checks that LARS Lasso is", "distutils.version import LooseVersion import numpy as np import pytest from", "far in the path to converge, and check that LARS", "= 0.2 y += sigma * rng.rand(*y.shape) y = y.squeeze()", "type=\"lasso\", intercept=FALSE, # trace=TRUE, normalize=FALSE) # r = t(model_lasso_lars$beta) #", "test that lasso_path (using lars_path output style) gives # the", "the positive option # This test is basically a copy", "= np.array([[0, 0, 0, 0, 0, -79.810362809499026, -83.528788732782829, -83.777653739190711, -83.784156932888934,", "= linear_model.lars_path(X1, y1) assert_array_almost_equal(coef_path.T, [[0, 0], [1, 0]]) def test_rank_deficient_design():", "Xy=Xy, Gram=G, n_samples=n_samples, method=method, return_path=return_path), linear_model.lars_path( X, y, Gram=G, method=method,", "comparing # against the R result (read the note above)", "style # under these conditions. rng = np.random.RandomState(42) # Generate", "try: sys.stdout = StringIO() _, _, coef_path_ = linear_model.lars_path( X,", "if LooseVersion(np.__version__) >= '1.14' else -1 coef_lstsq = np.linalg.lstsq(X1, y,", "Gram=G, method='lar') for i, coef_ in enumerate(coef_path_.T): res = y", "estimator.coef_.min() < 0 estimator = getattr(linear_model, estname)(positive=True, **params) estimator.fit(X, y)", "def test_lasso_lars_vs_R_implementation(): # Test that sklearn LassoLars implementation agrees with", "np.zeros_like(coef_path_)) def test_no_path(): # Test that the ``return_path=False`` option returns", "sklearn when fit_intercept=False and # normalize=False ########################################################################### # # The", "# numpy deprecation def test_lars_lstsq(): # Test that Lars gives", "10 X = rng.rand(n_samples, 5) y = np.zeros(n_samples) _, _,", "0 # now we gonna test the positive option for", "Gram=G) alpha_, _, coef = linear_model.lars_path( X, y, method='lar', Gram=G,", "verbose=verbose) lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]), 6) # The path should be", "# - alpha_bic > alpha_aic # - n_nonzero_bic < n_nonzero_aic", "\\ linear_model.lars_path(X, y, return_path=True, method=method, positive=True) assert coefs.min() >= 0", "from sklearn.utils.testing import TempMemmap from sklearn.exceptions import ConvergenceWarning from sklearn", "code: # # library(lars) # model_lasso_lars2 = lars(X, t(y), type=\"lasso\",", "not hasattr(lars_cv, 'n_nonzero_coefs') @pytest.mark.filterwarnings('ignore::FutureWarning') def test_lars_cv_max_iter(): with warnings.catch_warnings(record=True) as w:", "= t(model_lasso_lars$beta) # r = np.array([[0, 0, 0, 0, 0,", "change') # numpy deprecation def test_lasso_gives_lstsq_solution(): # Test that Lars", "= linear_model.Lasso(alpha=alpha, tol=1e-8, normalize=False).fit(X, y) err = linalg.norm(clf1.coef_ - clf2.coef_)", "'lar' (default) and lasso # Once deprecation of LAR +", "as np import pytest from scipy import linalg from sklearn.model_selection", "dataset # ensure that we get negative coefficients when positive=False", "estimator classes just make use of this function # we", "for i, coef_ in enumerate(coef_path_.T): res = y - np.dot(X,", "- np.dot(X, coef_cd_)) ** 2 + .1 * linalg.norm(coef_cd_, 1))", "5)) X_copy = X.copy() y = X[:, 2] lasso_lars.fit(X, y,", "= linear_model.LassoLarsCV() for length in (400, 200, 100): X =", "= linear_model.lars_path(X, y, Gram=G, Xy=Xy, method=method) for expected, got in", "- clf2.coef_) assert_less(err, 1e-3) # normalized data X = diabetes.data", "results when using the positive option # This test is", "estimators: estimator.fit(X, Y) Y_pred = estimator.predict(X) alphas, active, coef, path", "= linalg.norm(c - lasso_cd.coef_) assert_less(error, 0.01) # similar test, with", "lasso2.fit(X, y) assert_array_almost_equal(lasso.alphas_[:3], lasso2.alphas_) # Also check that the sequence", "not blow up, and stays somewhat close to a solution", "option. However for the middle part, the comparison of coefficient", "# Assure that estimators receiving multidimensional y do the right", "test_no_path(): # Test that the ``return_path=False`` option returns the correct", "assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1] def test_no_path_precomputed(): #", "[0, 5], [10, 10]], [[10, 10, 0], [1e-32, 0, 0],", "lars_cv.fit(X, y) np.testing.assert_array_less(old_alpha, lars_cv.alpha_) old_alpha = lars_cv.alpha_ assert not hasattr(lars_cv,", "0, 0, 0, 0], [0.08239882, 0.85784863, 0, 0, 0], [0.30114139,", "the coefs to compute the objective function, # we need", "do the test on the diabetes dataset # ensure that", "the transmissibility for the positive option of all estimator #", "positive=True) for c, a in zip(lasso_path.T, alphas): if a ==", "path X1 = 3 * X # use un-normalized dataset", "== X.shape[1] def _assert_same_lars_path_result(output1, output2): assert_equal(len(output1), len(output2)) for o1, o2", "lars.fit(X, y).coef_ obj_lars = (1. / (2. * 3.) *", "- 1e-2, 20): clf1 = linear_model.LassoLars(alpha=alpha, normalize=False).fit(X, y) clf2 =", "Regression' # by Efron et al 2004. The coefficients are", "method='lar', Gram=G, return_path=False) assert_array_almost_equal(coef, coef_path_[:, -1]) assert alpha_ == alphas_[-1]", "compute the objective function, # we need to turn off", "the # same results when using the positive option #", "read-only mode # This is a non-regression test for: #", "# The path should be of length 6 + 1", "y) skl_betas = model_lasso_lars.coef_path_ assert_array_almost_equal(r, skl_betas, decimal=12) ########################################################################### ########################################################################### #", "# https://gist.github.com/michigraber/7e7d7c75eca694c7a6ff for alpha in np.linspace(6e-1, 1 - 1e-2, 20):", "input, the # fold data is in read-only mode #", "answer G = np.dot(X.T, X) Xy = np.dot(X.T, y) for", "normalization lars = linear_model.LassoLars(.1, normalize=False) coef_lars_ = lars.fit(X, y).coef_ obj_lars", "6.12277822, -19.42109366]) x = np.array([[0.47299829, 0, 0, 0, 0], [0.08239882,", "= lars(X, t(y), type=\"lasso\", intercept=FALSE, # trace=TRUE, normalize=FALSE) # r", "added if necessary # test for 6d2b4c # Hilbert matrix", "this is no longer the case with the # equality_tolerance", "Scenario 2: Let's compare R vs sklearn when fit_intercept=True and", "y, method='lasso') coef_lstsq = np.linalg.lstsq(X, y)[0] assert_array_almost_equal(coef_lstsq, coef_path_[:, -1]) def", "verbose=0, max_iter=500) assert_array_almost_equal(coef_path_, np.zeros_like(coef_path_)) def test_no_path(): # Test that the", "def test_estimatorclasses_positive_constraint(): # testing the transmissibility for the positive option", "0 def test_lasso_lars_vs_lasso_cd_positive(): # Test that LassoLars and Lasso using", "np.dot(X.T, y) assert_raises(ValueError, linear_model.lars_path, None, y, Gram=None, Xy=Xy) def test_all_precomputed():", "to True, R returns the coefficients in # their original", "0, 0, 0, 0], [0, 0, 0, 8.371887668009453, 19.463768371044026], [0,", "assert alpha_ == alphas_[-1] def test_no_path_precomputed(): # Test that the", "# as coordinate descent Lasso y = [5, 0, 5]", "TempMemmap from sklearn.exceptions import ConvergenceWarning from sklearn import linear_model, datasets", "copy=False _lars_path_residues(X_train, y_train, X_test, y_test, copy=False) @pytest.mark.filterwarnings('ignore: The default of", "x.T ########################################################################### # Scenario 1: Let's compare R vs sklearn", "# test for 6d2b4c # Hilbert matrix n = 5", "length 6 + 1 in a Lars going down to", "from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_less from sklearn.utils.testing", "Gram=G, Xy=Xy, method=method) for expected, got in zip(output, output_pre): assert_array_almost_equal(expected,", "* 3.)) * linalg.norm(y - np.dot(X, coef_cd_)) ** 2 +", "to be the case that Lars had to use the", "circumstance that the Lars-Lasso algorithm does not converge to #", "estimator.predict(X) alphas, active, coef, path = (estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_)", "Lasso gives least square solution at the end # of", "# equality_tolerance checks X = [[1e20, 1e20, 0], [-1e-32, 0,", "at least some features get added if necessary # test", "# Test when input is a singular matrix X1 =", "(X_train, X_test, y_train, y_test): # The following should not fail", "-0.0803561, 0.40427291]]) X = x.T ########################################################################### # Scenario 1: Let's", "sigma * rng.rand(*y.shape) y = y.squeeze() lars_alphas, _, lars_coef =", "= np.dot(X.T, X) Xy = np.dot(X.T, y) for method in", "m = 70, 100 k = 5 X = rng.randn(n,", "# 0.22 @pytest.mark.parametrize( 'classifier', [linear_model.Lars, linear_model.LarsCV, linear_model.LassoLarsIC]) def test_lars_precompute(classifier): #", "alphas_, _, coef_path_ = linear_model.lars_path( X, y, method='lar', Gram=G) alpha_,", "0, 0, 2.231558436628169, 2.723267514525966, 2.811549786389614, 2.813766976061531, 2.817462468949557, 2.817368178703816, 2.816221090636795], [0,", "the classifiers for alpha in np.linspace(1e-2, 1 - 1e-2, 20):", "chosen. old_alpha = 0 lars_cv = linear_model.LassoLarsCV() for length in", "(estimator.alphas_, estimator.active_, estimator.coef_, estimator.coef_path_) for k in range(n_targets): estimator.fit(X, Y[:,", "6d2b4c # Hilbert matrix n = 5 H = 1.", "error = linalg.norm(lasso_path[:, -1] - lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_path_length():", "method=method, positive=True) assert coefs.min() >= 0 # now we gonna", "is restricted # as compared with the above test without", "comparison of coefficient values # for a range of alphas,", "+ .1 * linalg.norm(coef_cd_, 1)) assert_less(obj_lars, obj_cd * (1. +", "test_lasso_lars_ic(): # Test the LassoLarsIC object by checking that #", "= rng.randn(n, m) w = np.zeros((m, 1)) i = np.arange(0,", "np.sign(rng.randn(k, 1)) * (rng.rand(k, 1) + 1) y = np.dot(X,", "8.371887668009453, 19.463768371044026], [0, 0, 0, 0, 9.901611055290553], [0, 7.495923132833733, 9.245133544334507,", "X, y) def test_lars_path_readonly_data(): # When using automated memory mapping", "coord_descent.fit(X, y).coef_ cd_obj = objective_function(cd_coef_) assert_less(lars_obj, cd_obj * (1. +", "= linear_model.Lasso(alpha=alpha, tol=1e-4, normalize=False) cd_coef_ = coord_descent.fit(X, y).coef_ cd_obj =", "nonzero_aic = np.where(lars_aic.coef_)[0] assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) #", "# this is the main test for the positive parameter", "= linear_model.lars_path(X, y, method=method) output_pre = linear_model.lars_path(X, y, Gram=G, Xy=Xy,", "part, the comparison of coefficient values # for a range", "lasso_cd = linear_model.Lasso(fit_intercept=False, tol=1e-8, positive=True) for c, a in zip(lasso_path.T,", "that estimators receiving multidimensional y do the right thing Y", "= linear_model.Lasso(fit_intercept=False, normalize=True, tol=1e-8) for c, a in zip(lasso_path.T, alphas):", "[0, 7.495923132833733, 9.245133544334507, 17.389369207545062, 26.971656815643499], [0, 0, -1.569380717440311, -5.924804108067312, -7.996385265061972]])", "# Check that lars_path is robust to collinearity in input", "LARS Lasso is handling rank # deficient input data (with", "for 6d2b4c # Hilbert matrix n = 5 H =", "value change when numpy >= 1.14 rcond = None if", "make sure it's bounded n_samples = 10 X = rng.rand(n_samples,", "- lasso_cd.coef_) assert_less(error, 0.01) def test_lasso_lars_vs_R_implementation(): # Test that sklearn", "= 70, 100 k = 5 X = rng.randn(n, m)", "y) clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X, y) err", "before comparing # against the R result (read the note", "+ 1 else: # no more than max_pred variables can", "eps < abs(cov)]) if i < X.shape[1]: assert ocur ==", "this is the main test for the positive parameter on", "coefs to compute the objective function, # we need to", "R vs sklearn when fit_intercept=True and # normalize=True # #", "Test that the path length of the LassoLars is right", "-0.07501577, 0.80895216, 0, 0], [-0.01460346, -0.1015233, 0.0407278, 0.80338378, 0], [-0.69363927,", "alpha=0 lasso_cd.alpha = a lasso_cd.fit(X, y) error = linalg.norm(c -", "test_singular_matrix(): # Test when input is a singular matrix X1", "[[0, 0], [1, 0]]) def test_rank_deficient_design(): # consistency test that", "9.245133544334507, 17.389369207545062, 26.971656815643499], [0, 0, -1.569380717440311, -5.924804108067312, -7.996385265061972]]) model_lasso_lars2 =", "non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X, y, random_state=42)", "type=\"lasso\", intercept=TRUE, # trace=TRUE, normalize=TRUE) # r2 = t(model_lasso_lars2$beta) r2", "descent give the same answers # Note it used to", "positive=True) assert coefs.min() >= 0 # now we gonna test", "is in read-only mode # This is a non-regression test", "'1.14' else -1 coef_lstsq = np.linalg.lstsq(X1, y, rcond=rcond)[0] assert_array_almost_equal(clf.coef_, coef_lstsq)", "positive=True).fit(X, y) clf2 = linear_model.Lasso(fit_intercept=False, alpha=alpha, tol=1e-8, normalize=False, positive=True).fit(X, y)", "== alphas_[-1] def test_no_path_precomputed(): # Test that the ``return_path=False`` option", "** 2 + alpha * linalg.norm(coef, 1)) lars = linear_model.LassoLars(alpha=alpha,", "is, they are rescaled back, whereas sklearn # does not", "that # it does not blow up, and stays somewhat", "change when numpy >= 1.14 rcond = None if LooseVersion(np.__version__)", "the number of samples increases. # This property is not", "is a non-regression test for: # https://github.com/scikit-learn/scikit-learn/issues/4597 splitted_data = train_test_split(X,", "[0, 0, 0, 0, 9.901611055290553], [0, 7.495923132833733, 9.245133544334507, 17.389369207545062, 26.971656815643499],", "all positive when positive=True # for method 'lar' (default) and", "= linear_model.Lars(n_nonzero_coefs=6, verbose=verbose) lars.fit(X, y) assert_equal(len(lars.coef_.nonzero()[0]), 6) # The path", "0.1}, 'LassoLarsCV': {}, 'LassoLarsIC': {}} @pytest.mark.filterwarnings('ignore: The default value of", "we get negative coefficients when positive=False # and all positive", "None, y, Gram=None, Xy=Xy) def test_all_precomputed(): # Test that lars_path", "features lars_cv = linear_model.LassoLarsCV(max_iter=5) lars_cv.fit(X, y) assert len(w) == 0", "diabetes.target G = np.dot(X.T, X) Xy = np.dot(X.T, y) alphas_,", "y_train, X_test, y_test, copy=False) @pytest.mark.filterwarnings('ignore: The default of the `iid`')", "positive option of all estimator # classes in this same", "Test that user input regarding copy_X is not being overridden", "= 3 * X # use un-normalized dataset clf =", "0.], [1., 1., 0]]) y = np.array([1., 0., 0]) rng", "y_test, copy=False) @pytest.mark.filterwarnings('ignore: The default of the `iid`') # 0.22", "very ill-conditioned design, and check that # it does not", "Y_pred = estimator.predict(X) alphas, active, coef, path = (estimator.alphas_, estimator.active_,", "fit_intercept=False and # normalize=False ########################################################################### # # The R result", "2.723267514525966, 2.811549786389614, 2.813766976061531, 2.817462468949557, 2.817368178703816, 2.816221090636795], [0, 0, -1.218422599914637, -3.457726183014808,", "are selected. # - alpha_bic > alpha_aic # - n_nonzero_bic", "from distutils.version import LooseVersion import numpy as np import pytest", "* diabetes.data alphas, _, lasso_path = linear_model.lars_path(X, y, method='lasso') lasso_cd", "estimator.coef_path_) for k in range(n_targets): estimator.fit(X, Y[:, k]) y_pred =", "= [[1e20, 1e20, 0], [-1e-32, 0, 0], [1, 1, 1]]", "- clf2.coef_) assert_less(err, 1e-3) # same test, with normalized data", "right answer G = np.dot(X.T, X) Xy = np.dot(X.T, y)", "sklearn.utils.testing import assert_warns from sklearn.utils.testing import TempMemmap from sklearn.exceptions import", "data is in read-only mode # This is a non-regression", "assert_greater(lars_bic.alpha_, lars_aic.alpha_) assert_less(len(nonzero_bic), len(nonzero_aic)) assert_less(np.max(nonzero_bic), diabetes.data.shape[1]) # test error on", "= np.max(abs(cov)) eps = 1e-3 ocur = len(cov[C - eps", "that LARS Lasso is handling rank # deficient input data", "y) assert min(estimator.coef_) >= 0 def test_lasso_lars_vs_lasso_cd_positive(): # Test that", "got) @pytest.mark.filterwarnings('ignore: `rcond` parameter will change') # numpy deprecation def", "the Lars-Lasso algorithm and start to # diverge thereafter. See", "LARS has to go # far in the path to", "ignore_warnings from sklearn.utils.testing import assert_warns from sklearn.utils.testing import TempMemmap from" ]
[ "server_tokens off; # server_names_hash_bucket_size 64; # server_name_in_redirect off; include /etc/nginx/mime.types;", "and brackets_level == 0: re_option = re.search('\\s*(?P<param_name>\\w+)\\s*(?P<param_options>.*?);', config[param_start:], re.S) if", "copy import re from blocks import Block, EmptyBlock, KeyValueOption, Comment,", "new_block if block['block']: parse(block['block'], new_block) config = config[re_block.end():] pos, param_start", "param_start = 0, 0 continue if config[pos] == '{': brackets_level", "off; # server_names_hash_bucket_size 64; # server_name_in_redirect off; include /etc/nginx/mime.types; default_type", "re.search( '(?P<param_name>\\w+)\\s*(?P<param_options>.*)\\s*{(\\n){0,1}(?P<block>(.|\\n)*)}', config[param_start:pos + 1], ) block = re_block.groupdict() if", "/var/log/nginx/error.log; ## # Gzip Settings ## gzip on; gzip_disable \"msie6\";", "+= 1 elif config[pos] == '}': brackets_level -= 1 if", "pos, brackets_level, param_start = 0, 0, 0 while pos <", "pos < len(config): if config[pos] == '#' and brackets_level ==", "qwe) print(qwe.render()) qwe = EmptyBlock() parse(\"\"\" servername wqeqweqwe; http {", "# Gzip Settings ## gzip on; gzip_disable \"msie6\"; }#123123 \"\"\",", "if config[pos] == '{': brackets_level += 1 elif config[pos] ==", "not None: re_block = re.search( '(?P<param_name>\\w+)\\s*(?P<param_options>.*)\\s*{(\\n){0,1}(?P<block>(.|\\n)*)}', config[param_start:pos + 1], )", "on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; #", "config[param_start:], re.S) if not re_option: raise Exception('Wrong option') option =", "#coding: utf8 import copy import re from blocks import Block,", "option = re_option.groupdict() parent_block[option['param_name']] = KeyValueOption(re.sub('[ \\n]+', ' ', option['param_options']))", "config = config[re_option.end():] pos, param_start = 0, 0 continue if", "config[pos] == '{': brackets_level += 1 elif config[pos] == '}':", "and param_start is not None: re_block = re.search( '(?P<param_name>\\w+)\\s*(?P<param_options>.*)\\s*{(\\n){0,1}(?P<block>(.|\\n)*)}', config[param_start:pos", "!= 0: raise Exception('Not closed bracket') qwe = EmptyBlock() parse(\"\"\"#{", "import re from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location", "raise Exception('Not closed bracket') qwe = EmptyBlock() parse(\"\"\"#{ asd #qweqeqwe{}", "# comment {lalalal} #1 server { listen 8080 tls; root", "location /qwe{ s 500; }#123 }#qweqwe\"\"\", qwe) print(qwe.render()) qwe =", "}#123 }#qweqwe\"\"\", qwe) print(qwe.render()) qwe = EmptyBlock() parse(\"\"\" servername wqeqweqwe;", "default_type application/octet-stream; ## # Logging Settings ## access_log /var/log/nginx/access.log; error_log", "= re_sharp_comment.groupdict() parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment'])) config = config[re_sharp_comment.end():] pos, param_start =", "config = config[re_sharp_comment.end():] pos, param_start = 0, 0 continue if", "', option['param_options'])) config = config[re_option.end():] pos, param_start = 0, 0", "config[param_start:pos + 1], ) block = re_block.groupdict() if block['param_name'].lower() ==", "if brackets_level != 0: raise Exception('Not closed bracket') qwe =", "= EmptyBlock() parse(\"\"\"#{ asd #qweqeqwe{} servername qweqweqweqweqwe; # comment {lalalal}", "{lalalal} #1 server { listen 8080 tls; root /data/up1; location", "1 if brackets_level != 0: raise Exception('Not closed bracket') qwe", "access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; ## # Gzip Settings ## gzip", "Block, EmptyBlock, KeyValueOption, Comment, Location def parse(s, parent_block): config =", "re_sharp_comment.groupdict() parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment'])) config = config[re_sharp_comment.end():] pos, param_start = 0,", "error_log /var/log/nginx/error.log; ## # Gzip Settings ## gzip on; gzip_disable", "## access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; ## # Gzip Settings ##", "brackets_level == 0: re_option = re.search('\\s*(?P<param_name>\\w+)\\s*(?P<param_options>.*?);', config[param_start:], re.S) if not", "config = config[re_block.end():] pos, param_start = 0, 0 continue pos", "brackets_level += 1 elif config[pos] == '}': brackets_level -= 1", "pos, param_start = 0, 0 continue if config[pos] == '{':", "= 0, 0 continue pos += 1 if brackets_level !=", "0 while pos < len(config): if config[pos] == '#' and", "config[pos] == '#' and brackets_level == 0: re_sharp_comment = re.search('(?P<offset>[\\s\\n]*)#(?P<comment>.*)$',", "Location def parse(s, parent_block): config = copy.copy(s) pos, brackets_level, param_start", "re_block.groupdict() if block['param_name'].lower() == 'location': new_block = Location(block['param_options']) parent_block.add_location(new_block) else:", "if not re_option: raise Exception('Wrong option') option = re_option.groupdict() parent_block[option['param_name']]", "new_block) config = config[re_block.end():] pos, param_start = 0, 0 continue", "servername qweqweqweqweqwe; # comment {lalalal} #1 server { listen 8080", "Location(block['param_options']) parent_block.add_location(new_block) else: new_block = Block() parent_block[block['param_name']] = new_block if", "';' and brackets_level == 0: re_option = re.search('\\s*(?P<param_name>\\w+)\\s*(?P<param_options>.*?);', config[param_start:], re.S)", "param_start = 0, 0 continue pos += 1 if brackets_level", "tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; # server_tokens off; #", "EmptyBlock() parse(\"\"\"#{ asd #qweqeqwe{} servername qweqweqweqweqwe; # comment {lalalal} #1", "parse(block['block'], new_block) config = config[re_block.end():] pos, param_start = 0, 0", "0 continue pos += 1 if brackets_level != 0: raise", "servername wqeqweqwe; http { ## # Basic Settings ## sendfile", "wqeqweqwe; http { ## # Basic Settings ## sendfile on;", "and brackets_level == 0: re_sharp_comment = re.search('(?P<offset>[\\s\\n]*)#(?P<comment>.*)$', config, re.M) sharp_comment", "http { ## # Basic Settings ## sendfile on; tcp_nopush", "utf8 import copy import re from blocks import Block, EmptyBlock,", "new_block = Block() parent_block[block['param_name']] = new_block if block['block']: parse(block['block'], new_block)", "print(qwe.render()) qwe = EmptyBlock() parse(\"\"\" servername wqeqweqwe; http { ##", "server { listen 8080 tls; root /data/up1; location / {", "# server_names_hash_bucket_size 64; # server_name_in_redirect off; include /etc/nginx/mime.types; default_type application/octet-stream;", "on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; # server_tokens off;", "= re.search('(?P<offset>[\\s\\n]*)#(?P<comment>.*)$', config, re.M) sharp_comment = re_sharp_comment.groupdict() parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment'])) config", "# server_tokens off; # server_names_hash_bucket_size 64; # server_name_in_redirect off; include", "0, 0 continue pos += 1 if brackets_level != 0:", "= config[re_block.end():] pos, param_start = 0, 0 continue pos +=", "'{': brackets_level += 1 elif config[pos] == '}': brackets_level -=", "qwe = EmptyBlock() parse(\"\"\"#{ asd #qweqeqwe{} servername qweqweqweqweqwe; # comment", "= KeyValueOption(re.sub('[ \\n]+', ' ', option['param_options'])) config = config[re_option.end():] pos,", "if block['block']: parse(block['block'], new_block) config = config[re_block.end():] pos, param_start =", "continue if config[pos] == '{': brackets_level += 1 elif config[pos]", "asd #qweqeqwe{} servername qweqweqweqweqwe; # comment {lalalal} #1 server {", "/etc/nginx/mime.types; default_type application/octet-stream; ## # Logging Settings ## access_log /var/log/nginx/access.log;", "= config[re_option.end():] pos, param_start = 0, 0 continue if config[pos]", "{ ## # Basic Settings ## sendfile on; tcp_nopush on;", "## # Gzip Settings ## gzip on; gzip_disable \"msie6\"; }#123123", "# Basic Settings ## sendfile on; tcp_nopush on; tcp_nodelay on;", "on; keepalive_timeout 65; types_hash_max_size 2048; # server_tokens off; # server_names_hash_bucket_size", "config[pos] == ';' and brackets_level == 0: re_option = re.search('\\s*(?P<param_name>\\w+)\\s*(?P<param_options>.*?);',", "+= 1 if brackets_level != 0: raise Exception('Not closed bracket')", "while pos < len(config): if config[pos] == '#' and brackets_level", "EmptyBlock() parse(\"\"\" servername wqeqweqwe; http { ## # Basic Settings", "== 0: re_sharp_comment = re.search('(?P<offset>[\\s\\n]*)#(?P<comment>.*)$', config, re.M) sharp_comment = re_sharp_comment.groupdict()", "== 0: re_option = re.search('\\s*(?P<param_name>\\w+)\\s*(?P<param_options>.*?);', config[param_start:], re.S) if not re_option:", "option') option = re_option.groupdict() parent_block[option['param_name']] = KeyValueOption(re.sub('[ \\n]+', ' ',", "== '{': brackets_level += 1 elif config[pos] == '}': brackets_level", "= EmptyBlock() parse(\"\"\" servername wqeqweqwe; http { ## # Basic", "s 500; }#123 }#qweqwe\"\"\", qwe) print(qwe.render()) qwe = EmptyBlock() parse(\"\"\"", "Comment, Location def parse(s, parent_block): config = copy.copy(s) pos, brackets_level,", "<filename>parser.py #coding: utf8 import copy import re from blocks import", "pos, param_start = 0, 0 continue if config[pos] == ';'", "-= 1 if brackets_level == 0 and param_start is not", "sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048;", "sharp_comment['comment'])) config = config[re_sharp_comment.end():] pos, param_start = 0, 0 continue", "Block() parent_block[block['param_name']] = new_block if block['block']: parse(block['block'], new_block) config =", "parent_block[option['param_name']] = KeyValueOption(re.sub('[ \\n]+', ' ', option['param_options'])) config = config[re_option.end():]", "2048; # server_tokens off; # server_names_hash_bucket_size 64; # server_name_in_redirect off;", "server_name_in_redirect off; include /etc/nginx/mime.types; default_type application/octet-stream; ## # Logging Settings", "1 if brackets_level == 0 and param_start is not None:", "None: re_block = re.search( '(?P<param_name>\\w+)\\s*(?P<param_options>.*)\\s*{(\\n){0,1}(?P<block>(.|\\n)*)}', config[param_start:pos + 1], ) block", "0: raise Exception('Not closed bracket') qwe = EmptyBlock() parse(\"\"\"#{ asd", "Settings ## sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65;", "'(?P<param_name>\\w+)\\s*(?P<param_options>.*)\\s*{(\\n){0,1}(?P<block>(.|\\n)*)}', config[param_start:pos + 1], ) block = re_block.groupdict() if block['param_name'].lower()", "Exception('Not closed bracket') qwe = EmptyBlock() parse(\"\"\"#{ asd #qweqeqwe{} servername", "# server_name_in_redirect off; include /etc/nginx/mime.types; default_type application/octet-stream; ## # Logging", "types_hash_max_size 2048; # server_tokens off; # server_names_hash_bucket_size 64; # server_name_in_redirect", "comment {lalalal} #1 server { listen 8080 tls; root /data/up1;", "## # Logging Settings ## access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; ##", "qweqweqweqweqwe; # comment {lalalal} #1 server { listen 8080 tls;", "brackets_level == 0: re_sharp_comment = re.search('(?P<offset>[\\s\\n]*)#(?P<comment>.*)$', config, re.M) sharp_comment =", "parse(s, parent_block): config = copy.copy(s) pos, brackets_level, param_start = 0,", "closed bracket') qwe = EmptyBlock() parse(\"\"\"#{ asd #qweqeqwe{} servername qweqweqweqweqwe;", "Gzip Settings ## gzip on; gzip_disable \"msie6\"; }#123123 \"\"\", qwe)", "parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment'])) config = config[re_sharp_comment.end():] pos, param_start = 0, 0", "blocks import Block, EmptyBlock, KeyValueOption, Comment, Location def parse(s, parent_block):", "len(config): if config[pos] == '#' and brackets_level == 0: re_sharp_comment", "tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size 2048; # server_tokens", "config[pos] == '}': brackets_level -= 1 if brackets_level == 0", "== 0 and param_start is not None: re_block = re.search(", "elif config[pos] == '}': brackets_level -= 1 if brackets_level ==", "0 and param_start is not None: re_block = re.search( '(?P<param_name>\\w+)\\s*(?P<param_options>.*)\\s*{(\\n){0,1}(?P<block>(.|\\n)*)}',", "{ listen 8080 tls; root /data/up1; location / { l200;", "import Block, EmptyBlock, KeyValueOption, Comment, Location def parse(s, parent_block): config", "config = copy.copy(s) pos, brackets_level, param_start = 0, 0, 0", "sharp_comment = re_sharp_comment.groupdict() parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment'])) config = config[re_sharp_comment.end():] pos, param_start", "Exception('Wrong option') option = re_option.groupdict() parent_block[option['param_name']] = KeyValueOption(re.sub('[ \\n]+', '", "/qwe{ s 500; }#123 }#qweqwe\"\"\", qwe) print(qwe.render()) qwe = EmptyBlock()", "## sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout 65; types_hash_max_size", "KeyValueOption, Comment, Location def parse(s, parent_block): config = copy.copy(s) pos,", "re_option = re.search('\\s*(?P<param_name>\\w+)\\s*(?P<param_options>.*?);', config[param_start:], re.S) if not re_option: raise Exception('Wrong", "= config[re_sharp_comment.end():] pos, param_start = 0, 0 continue if config[pos]", "re.S) if not re_option: raise Exception('Wrong option') option = re_option.groupdict()", "= re_block.groupdict() if block['param_name'].lower() == 'location': new_block = Location(block['param_options']) parent_block.add_location(new_block)", "parent_block.add_location(new_block) else: new_block = Block() parent_block[block['param_name']] = new_block if block['block']:", "brackets_level -= 1 if brackets_level == 0 and param_start is", "= Block() parent_block[block['param_name']] = new_block if block['block']: parse(block['block'], new_block) config", "## # Basic Settings ## sendfile on; tcp_nopush on; tcp_nodelay", "64; # server_name_in_redirect off; include /etc/nginx/mime.types; default_type application/octet-stream; ## #", "def parse(s, parent_block): config = copy.copy(s) pos, brackets_level, param_start =", "include /etc/nginx/mime.types; default_type application/octet-stream; ## # Logging Settings ## access_log", "parent_block[block['param_name']] = new_block if block['block']: parse(block['block'], new_block) config = config[re_block.end():]", "config[re_option.end():] pos, param_start = 0, 0 continue if config[pos] ==", "listen 8080 tls; root /data/up1; location / { l200; }", "brackets_level == 0 and param_start is not None: re_block =", "# Logging Settings ## access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; ## #", "keepalive_timeout 65; types_hash_max_size 2048; # server_tokens off; # server_names_hash_bucket_size 64;", "= 0, 0 continue if config[pos] == '{': brackets_level +=", "pos += 1 if brackets_level != 0: raise Exception('Not closed", "application/octet-stream; ## # Logging Settings ## access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log;", "raise Exception('Wrong option') option = re_option.groupdict() parent_block[option['param_name']] = KeyValueOption(re.sub('[ \\n]+',", "param_start = 0, 0 continue if config[pos] == ';' and", "Settings ## gzip on; gzip_disable \"msie6\"; }#123123 \"\"\", qwe) print(qwe.render())", "if brackets_level == 0 and param_start is not None: re_block", "config, re.M) sharp_comment = re_sharp_comment.groupdict() parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment'])) config = config[re_sharp_comment.end():]", "KeyValueOption(re.sub('[ \\n]+', ' ', option['param_options'])) config = config[re_option.end():] pos, param_start", "0, 0, 0 while pos < len(config): if config[pos] ==", "0, 0 while pos < len(config): if config[pos] == '#'", "= copy.copy(s) pos, brackets_level, param_start = 0, 0, 0 while", "0: re_sharp_comment = re.search('(?P<offset>[\\s\\n]*)#(?P<comment>.*)$', config, re.M) sharp_comment = re_sharp_comment.groupdict() parent_block.add_comment(Comment(sharp_comment['offset'],", "re_block = re.search( '(?P<param_name>\\w+)\\s*(?P<param_options>.*)\\s*{(\\n){0,1}(?P<block>(.|\\n)*)}', config[param_start:pos + 1], ) block =", "= Location(block['param_options']) parent_block.add_location(new_block) else: new_block = Block() parent_block[block['param_name']] = new_block", "= re.search('\\s*(?P<param_name>\\w+)\\s*(?P<param_options>.*?);', config[param_start:], re.S) if not re_option: raise Exception('Wrong option')", "1 elif config[pos] == '}': brackets_level -= 1 if brackets_level", "65; types_hash_max_size 2048; # server_tokens off; # server_names_hash_bucket_size 64; #", "else: new_block = Block() parent_block[block['param_name']] = new_block if block['block']: parse(block['block'],", "}#qweqwe\"\"\", qwe) print(qwe.render()) qwe = EmptyBlock() parse(\"\"\" servername wqeqweqwe; http", "param_start = 0, 0, 0 while pos < len(config): if", "= new_block if block['block']: parse(block['block'], new_block) config = config[re_block.end():] pos,", "is not None: re_block = re.search( '(?P<param_name>\\w+)\\s*(?P<param_options>.*)\\s*{(\\n){0,1}(?P<block>(.|\\n)*)}', config[param_start:pos + 1],", "config[re_block.end():] pos, param_start = 0, 0 continue pos += 1", "bracket') qwe = EmptyBlock() parse(\"\"\"#{ asd #qweqeqwe{} servername qweqweqweqweqwe; #", "parse(\"\"\" servername wqeqweqwe; http { ## # Basic Settings ##", "pos, param_start = 0, 0 continue pos += 1 if", "Logging Settings ## access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; ## # Gzip", "re.M) sharp_comment = re_sharp_comment.groupdict() parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment'])) config = config[re_sharp_comment.end():] pos,", "\\n]+', ' ', option['param_options'])) config = config[re_option.end():] pos, param_start =", "not re_option: raise Exception('Wrong option') option = re_option.groupdict() parent_block[option['param_name']] =", "== '}': brackets_level -= 1 if brackets_level == 0 and", "parent_block): config = copy.copy(s) pos, brackets_level, param_start = 0, 0,", "brackets_level != 0: raise Exception('Not closed bracket') qwe = EmptyBlock()", "< len(config): if config[pos] == '#' and brackets_level == 0:", "block['block']: parse(block['block'], new_block) config = config[re_block.end():] pos, param_start = 0,", "= re.search( '(?P<param_name>\\w+)\\s*(?P<param_options>.*)\\s*{(\\n){0,1}(?P<block>(.|\\n)*)}', config[param_start:pos + 1], ) block = re_block.groupdict()", "l200; } location /qwe{ s 500; }#123 }#qweqwe\"\"\", qwe) print(qwe.render())", "{ l200; } location /qwe{ s 500; }#123 }#qweqwe\"\"\", qwe)", "/var/log/nginx/access.log; error_log /var/log/nginx/error.log; ## # Gzip Settings ## gzip on;", "== ';' and brackets_level == 0: re_option = re.search('\\s*(?P<param_name>\\w+)\\s*(?P<param_options>.*?);', config[param_start:],", "8080 tls; root /data/up1; location / { l200; } location", "re.search('(?P<offset>[\\s\\n]*)#(?P<comment>.*)$', config, re.M) sharp_comment = re_sharp_comment.groupdict() parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment'])) config =", "location / { l200; } location /qwe{ s 500; }#123", "0 continue if config[pos] == '{': brackets_level += 1 elif", "} location /qwe{ s 500; }#123 }#qweqwe\"\"\", qwe) print(qwe.render()) qwe", "server_names_hash_bucket_size 64; # server_name_in_redirect off; include /etc/nginx/mime.types; default_type application/octet-stream; ##", "0: re_option = re.search('\\s*(?P<param_name>\\w+)\\s*(?P<param_options>.*?);', config[param_start:], re.S) if not re_option: raise", "re from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location def", "block = re_block.groupdict() if block['param_name'].lower() == 'location': new_block = Location(block['param_options'])", "if config[pos] == '#' and brackets_level == 0: re_sharp_comment =", "root /data/up1; location / { l200; } location /qwe{ s", "new_block = Location(block['param_options']) parent_block.add_location(new_block) else: new_block = Block() parent_block[block['param_name']] =", ") block = re_block.groupdict() if block['param_name'].lower() == 'location': new_block =", "/ { l200; } location /qwe{ s 500; }#123 }#qweqwe\"\"\",", "== 'location': new_block = Location(block['param_options']) parent_block.add_location(new_block) else: new_block = Block()", "continue if config[pos] == ';' and brackets_level == 0: re_option", "'location': new_block = Location(block['param_options']) parent_block.add_location(new_block) else: new_block = Block() parent_block[block['param_name']]", "continue pos += 1 if brackets_level != 0: raise Exception('Not", "+ 1], ) block = re_block.groupdict() if block['param_name'].lower() == 'location':", "parse(\"\"\"#{ asd #qweqeqwe{} servername qweqweqweqweqwe; # comment {lalalal} #1 server", "#qweqeqwe{} servername qweqweqweqweqwe; # comment {lalalal} #1 server { listen", "tls; root /data/up1; location / { l200; } location /qwe{", "re_sharp_comment = re.search('(?P<offset>[\\s\\n]*)#(?P<comment>.*)$', config, re.M) sharp_comment = re_sharp_comment.groupdict() parent_block.add_comment(Comment(sharp_comment['offset'], sharp_comment['comment']))", "500; }#123 }#qweqwe\"\"\", qwe) print(qwe.render()) qwe = EmptyBlock() parse(\"\"\" servername", "re.search('\\s*(?P<param_name>\\w+)\\s*(?P<param_options>.*?);', config[param_start:], re.S) if not re_option: raise Exception('Wrong option') option", "off; include /etc/nginx/mime.types; default_type application/octet-stream; ## # Logging Settings ##", "0, 0 continue if config[pos] == '{': brackets_level += 1", "== '#' and brackets_level == 0: re_sharp_comment = re.search('(?P<offset>[\\s\\n]*)#(?P<comment>.*)$', config,", "'#' and brackets_level == 0: re_sharp_comment = re.search('(?P<offset>[\\s\\n]*)#(?P<comment>.*)$', config, re.M)", "from blocks import Block, EmptyBlock, KeyValueOption, Comment, Location def parse(s,", "import copy import re from blocks import Block, EmptyBlock, KeyValueOption,", "brackets_level, param_start = 0, 0, 0 while pos < len(config):", "= 0, 0, 0 while pos < len(config): if config[pos]", "' ', option['param_options'])) config = config[re_option.end():] pos, param_start = 0,", "if config[pos] == ';' and brackets_level == 0: re_option =", "#1 server { listen 8080 tls; root /data/up1; location /", "config[re_sharp_comment.end():] pos, param_start = 0, 0 continue if config[pos] ==", "copy.copy(s) pos, brackets_level, param_start = 0, 0, 0 while pos", "Settings ## access_log /var/log/nginx/access.log; error_log /var/log/nginx/error.log; ## # Gzip Settings", "Basic Settings ## sendfile on; tcp_nopush on; tcp_nodelay on; keepalive_timeout", "qwe = EmptyBlock() parse(\"\"\" servername wqeqweqwe; http { ## #", "block['param_name'].lower() == 'location': new_block = Location(block['param_options']) parent_block.add_location(new_block) else: new_block =", "= 0, 0 continue if config[pos] == ';' and brackets_level", "= re_option.groupdict() parent_block[option['param_name']] = KeyValueOption(re.sub('[ \\n]+', ' ', option['param_options'])) config", "param_start is not None: re_block = re.search( '(?P<param_name>\\w+)\\s*(?P<param_options>.*)\\s*{(\\n){0,1}(?P<block>(.|\\n)*)}', config[param_start:pos +", "1], ) block = re_block.groupdict() if block['param_name'].lower() == 'location': new_block", "0, 0 continue if config[pos] == ';' and brackets_level ==", "0 continue if config[pos] == ';' and brackets_level == 0:", "/data/up1; location / { l200; } location /qwe{ s 500;", "re_option.groupdict() parent_block[option['param_name']] = KeyValueOption(re.sub('[ \\n]+', ' ', option['param_options'])) config =", "option['param_options'])) config = config[re_option.end():] pos, param_start = 0, 0 continue", "'}': brackets_level -= 1 if brackets_level == 0 and param_start", "if block['param_name'].lower() == 'location': new_block = Location(block['param_options']) parent_block.add_location(new_block) else: new_block", "re_option: raise Exception('Wrong option') option = re_option.groupdict() parent_block[option['param_name']] = KeyValueOption(re.sub('[", "EmptyBlock, KeyValueOption, Comment, Location def parse(s, parent_block): config = copy.copy(s)" ]
[ "ret = subprocess.call('\"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"', shell=True) os.system('git clean -xdf", "= subprocess.call('\"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"', shell=True) os.system('git clean -xdf -f')", "Target:win32' print ' \"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"' if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False):", "Host:win7 x86' print ' Branch:develop' print ' Target:win32' print '", "print ' Host:win7 x86' print ' Branch:develop' print ' Target:win32'", "os.system(\"xcopy \" + source_dir + \" . /E /Y /H\")", "\"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"' if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False): node_name = os.environ['NODE_NAME'] source_dir", "' Host:win7 x86' print ' Branch:develop' print ' Target:win32' print", "x86' print ' Branch:develop' print ' Target:win32' print ' \"%VS110COMNTOOLS%..\\IDE\\devenv.com\"", "import sys print 'Build Config:' print ' Host:win7 x86' print", "\"Debug|Win32\"' if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False): node_name = os.environ['NODE_NAME'] source_dir = '../cocos-2dx-develop-base-repo/node/'", "source_dir = '../cocos-2dx-develop-base-repo/node/' + node_name source_dir = source_dir.replace(\"/\", os.sep) os.system(\"xcopy", "os.system('git pull origin develop') os.system('git submodule update --init --force') ret", "subprocess.call('\"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"', shell=True) os.system('git clean -xdf -f') print", "-f') print 'build exit' print ret if ret == 0:", "'build exit' print ret if ret == 0: exit(0) else:", "'../cocos-2dx-develop-base-repo/node/' + node_name source_dir = source_dir.replace(\"/\", os.sep) os.system(\"xcopy \" +", "os.system('git submodule update --init --force') ret = subprocess.call('\"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build", "os.system('git clean -xdf -f') print 'build exit' print ret if", "/Y /H\") os.system('git pull origin develop') os.system('git submodule update --init", "sys print 'Build Config:' print ' Host:win7 x86' print '", "Config:' print ' Host:win7 x86' print ' Branch:develop' print '", "'Build Config:' print ' Host:win7 x86' print ' Branch:develop' print", "+ node_name source_dir = source_dir.replace(\"/\", os.sep) os.system(\"xcopy \" + source_dir", "print ' Branch:develop' print ' Target:win32' print ' \"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\"", "source_dir.replace(\"/\", os.sep) os.system(\"xcopy \" + source_dir + \" . /E", "print 'build exit' print ret if ret == 0: exit(0)", "\" . /E /Y /H\") os.system('git pull origin develop') os.system('git", "import os import subprocess import sys print 'Build Config:' print", "if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False): node_name = os.environ['NODE_NAME'] source_dir = '../cocos-2dx-develop-base-repo/node/' +", "\" + source_dir + \" . /E /Y /H\") os.system('git", "/Build \"Debug|Win32\"', shell=True) os.system('git clean -xdf -f') print 'build exit'", "' Target:win32' print ' \"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"' if(os.path.exists('build/cocos2d-win32.vc2012.sln') ==", "Branch:develop' print ' Target:win32' print ' \"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"'", "node_name source_dir = source_dir.replace(\"/\", os.sep) os.system(\"xcopy \" + source_dir +", "exit' print ret if ret == 0: exit(0) else: exit(1)", "= source_dir.replace(\"/\", os.sep) os.system(\"xcopy \" + source_dir + \" .", "develop') os.system('git submodule update --init --force') ret = subprocess.call('\"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\"", "source_dir = source_dir.replace(\"/\", os.sep) os.system(\"xcopy \" + source_dir + \"", "print ' Target:win32' print ' \"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"' if(os.path.exists('build/cocos2d-win32.vc2012.sln')", "' Branch:develop' print ' Target:win32' print ' \"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build", "\"Debug|Win32\"', shell=True) os.system('git clean -xdf -f') print 'build exit' print", "print 'Build Config:' print ' Host:win7 x86' print ' Branch:develop'", "source_dir + \" . /E /Y /H\") os.system('git pull origin", "submodule update --init --force') ret = subprocess.call('\"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"',", "= os.environ['NODE_NAME'] source_dir = '../cocos-2dx-develop-base-repo/node/' + node_name source_dir = source_dir.replace(\"/\",", "node_name = os.environ['NODE_NAME'] source_dir = '../cocos-2dx-develop-base-repo/node/' + node_name source_dir =", "/Build \"Debug|Win32\"' if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False): node_name = os.environ['NODE_NAME'] source_dir =", "-xdf -f') print 'build exit' print ret if ret ==", "--init --force') ret = subprocess.call('\"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"', shell=True) os.system('git", "' \"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"' if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False): node_name =", "subprocess import sys print 'Build Config:' print ' Host:win7 x86'", "print ' \"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"' if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False): node_name", "/E /Y /H\") os.system('git pull origin develop') os.system('git submodule update", "--force') ret = subprocess.call('\"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"', shell=True) os.system('git clean", "update --init --force') ret = subprocess.call('\"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"', shell=True)", "import subprocess import sys print 'Build Config:' print ' Host:win7", "os.environ['NODE_NAME'] source_dir = '../cocos-2dx-develop-base-repo/node/' + node_name source_dir = source_dir.replace(\"/\", os.sep)", "clean -xdf -f') print 'build exit' print ret if ret", ". /E /Y /H\") os.system('git pull origin develop') os.system('git submodule", "+ \" . /E /Y /H\") os.system('git pull origin develop')", "+ source_dir + \" . /E /Y /H\") os.system('git pull", "\"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"', shell=True) os.system('git clean -xdf -f') print 'build", "os.sep) os.system(\"xcopy \" + source_dir + \" . /E /Y", "= '../cocos-2dx-develop-base-repo/node/' + node_name source_dir = source_dir.replace(\"/\", os.sep) os.system(\"xcopy \"", "os import subprocess import sys print 'Build Config:' print '", "== False): node_name = os.environ['NODE_NAME'] source_dir = '../cocos-2dx-develop-base-repo/node/' + node_name", "pull origin develop') os.system('git submodule update --init --force') ret =", "\"%VS110COMNTOOLS%..\\IDE\\devenv.com\" \"build\\cocos2d-win32.vc2012.sln\" /Build \"Debug|Win32\"' if(os.path.exists('build/cocos2d-win32.vc2012.sln') == False): node_name = os.environ['NODE_NAME']", "False): node_name = os.environ['NODE_NAME'] source_dir = '../cocos-2dx-develop-base-repo/node/' + node_name source_dir", "shell=True) os.system('git clean -xdf -f') print 'build exit' print ret", "/H\") os.system('git pull origin develop') os.system('git submodule update --init --force')", "origin develop') os.system('git submodule update --init --force') ret = subprocess.call('\"%VS110COMNTOOLS%..\\IDE\\devenv.com\"" ]
[ "import BaseData from iris_sdk.models.maps.ord.rate_center_search_order import \\ RateCenterSearchOrderMap class RateCenterSearchOrder(RateCenterSearchOrderMap, BaseData):", "python from iris_sdk.models.base_resource import BaseData from iris_sdk.models.maps.ord.rate_center_search_order import \\ RateCenterSearchOrderMap", "BaseData from iris_sdk.models.maps.ord.rate_center_search_order import \\ RateCenterSearchOrderMap class RateCenterSearchOrder(RateCenterSearchOrderMap, BaseData): pass", "<filename>iris_sdk/models/data/ord/rate_center_search_order.py #!/usr/bin/env python from iris_sdk.models.base_resource import BaseData from iris_sdk.models.maps.ord.rate_center_search_order import", "#!/usr/bin/env python from iris_sdk.models.base_resource import BaseData from iris_sdk.models.maps.ord.rate_center_search_order import \\", "iris_sdk.models.base_resource import BaseData from iris_sdk.models.maps.ord.rate_center_search_order import \\ RateCenterSearchOrderMap class RateCenterSearchOrder(RateCenterSearchOrderMap,", "from iris_sdk.models.base_resource import BaseData from iris_sdk.models.maps.ord.rate_center_search_order import \\ RateCenterSearchOrderMap class" ]
[ "bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "number of iterations (Iterations) export_flags : set The set of", "len(objectivefunc)): convergence = [0] * NumOfRuns executionTime = [0] *", "general parameters for all optimizers (population size, number of iterations)", "False Flag_details = False # CSV Header for for the", "= numpy.concatenate( [[\"Optimizer\", \"objfname\", \"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag_details =", "ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "range(0, Iterations): CnvgHeader.append(\"Iter\" + str(l + 1)) for i in", "ub, dim, popSize, Iter) elif algo == \"MFO\": x =", "elif algo == \"FFA\": x = ffa.FFA(getattr(benchmarks, function_name), lb, ub,", "the cinvergence CnvgHeader = [] results_directory = time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \"/\"", "flags which are: 1. Export (Exporting the results in a", "lb, ub, dim, popSize, Iter) elif algo == \"GWO\": x", "CSV file header = numpy.concatenate( [[\"Optimizer\", \"objfname\", \"ExecutionTime\"], CnvgHeader] )", "= x.convergence optimizerName = x.optimizer objfname = x.objfname if Export_details", "plots) Returns ----------- N/A \"\"\" # Select general parameters for", "# Select general parameters for all optimizers (population size, number", "i in range(0, len(optimizer)): for j in range(0, len(objectivefunc)): convergence", "popSize, Iter) elif algo == \"PSO\": x = pso.PSO(getattr(benchmarks, function_name),", ": list The list of optimizers names objectivefunc : list", "decimals=2 ).tolist() a = numpy.concatenate( [[optimizerName, objfname, avgExecutionTime], avgConvergence] )", "names objectivefunc : list The list of benchmark functions NumOfRuns", "algo == \"MFO\": x = mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim,", "= results_directory + \"experiment.csv\" with open(ExportToFile, \"a\", newline=\"\\n\") as out:", "Iter) elif algo == \"BAT\": x = bat.BAT(getattr(benchmarks, function_name), lb,", "= mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "plots) 4. Export_boxplot (Exporting the box plots) Returns ----------- N/A", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"FFA\":", "algo == \"GWO\": x = gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim,", "= sca.SCA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "NumOfRuns, params, export_flags): \"\"\" It serves as the main interface", "== \"HYBRID\": x = hybrid.HYBRID(getattr(benchmarks, function_name), lb, ub, dim, popSize,", "lb, ub, dim, popSize, Iter) elif algo == \"HHO\": x", "results in files) 3. Export_convergence (Exporting the covergence plots) 4.", "time import warnings import os import plot_convergence as conv_plot import", "out: writer = csv.writer(out, delimiter=\",\") if ( Flag_details == False", "lb, ub, dim, popSize, Iter) elif algo == \"GA\": x", "Flag_details = False # CSV Header for for the cinvergence", "# just one time to write the header of the", "one time to write the header of the CSV file", "objectivefunc, Iterations) if Flag == False: # Faild to run", "Iter) elif algo == \"GA\": x = ga.GA(getattr(benchmarks, function_name), lb,", "list of benchmark functions NumOfRuns : int The number of", "import optimizers.SCA as sca import optimizers.JAYA as jaya import optimizers.HYBRID", "export_flags : set The set of Boolean flags which are:", "== \"CS\": x = cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize,", "== \"SCA\": x = sca.SCA(getattr(benchmarks, function_name), lb, ub, dim, popSize,", "dim, popSize, Iter) elif algo == \"GWO\": x = gwo.GWO(getattr(benchmarks,", "return null return x def run(optimizer, objectivefunc, NumOfRuns, params, export_flags):", "convergence = [0] * NumOfRuns executionTime = [0] * NumOfRuns", "benchmark functions NumOfRuns : int The number of independent runs", "func_details[1] ub = func_details[2] dim = func_details[3] if algo ==", "ub, dim, popSize, Iter) elif algo == \"PSO\": x =", "Header for for the cinvergence CnvgHeader = [] results_directory =", "range(0, len(optimizer)): for j in range(0, len(objectivefunc)): convergence = [0]", "function is selected. Check lists of available optimizers and cost", "write the header of the CSV file header = numpy.concatenate(", "as ssa import optimizers.GA as ga import optimizers.HHO as hho", "jaya import optimizers.HYBRID as hybrid import benchmarks import csv import", "popSize, Iter) elif algo == \"SCA\": x = sca.SCA(getattr(benchmarks, function_name),", "least one experiment executionTime[k] = x.executionTime a = numpy.concatenate( [[x.optimizer,", "popSize, Iter) elif algo == \"JAYA\": x = jaya.JAYA(getattr(benchmarks, function_name),", "Iterations = params[\"Iterations\"] # Export results ? Export = export_flags[\"Export_avg\"]", "dtype=numpy.float64), decimals=2 ).tolist() a = numpy.concatenate( [[optimizerName, objfname, avgExecutionTime], avgConvergence]", "[0] * NumOfRuns executionTime = [0] * NumOfRuns for k", "optimizers (population size, number of iterations) .... PopulationSize = params[\"PopulationSize\"]", "numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2 ).tolist() a = numpy.concatenate( [[optimizerName, objfname,", "\"GWO\": x = gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "\"PSO\": x = pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "algo == \"CS\": x = cs.CS(getattr(benchmarks, function_name), lb, ub, dim,", "warnings import os import plot_convergence as conv_plot import plot_boxplot as", "numpy.concatenate( [[optimizerName, objfname, avgExecutionTime], avgConvergence] ) writer.writerow(a) out.close() if Export_convergence", "Export_details (Exporting the detailed results in files) 3. Export_convergence (Exporting", "x.convergence] ) writer.writerow(a) out.close() if Export == True: ExportToFile =", "Iter) elif algo == \"WOA\": x = woa.WOA(getattr(benchmarks, function_name), lb,", "func_details[3] if algo == \"SSA\": x = ssa.SSA(getattr(benchmarks, function_name), lb,", "avgConvergence] ) writer.writerow(a) out.close() if Export_convergence == True: conv_plot.run(results_directory, optimizer,", "func_details[0] lb = func_details[1] ub = func_details[2] dim = func_details[3]", "(Exporting the results in a file) 2. Export_details (Exporting the", "popSize, Iter) elif algo == \"HHO\": x = hho.HHO(getattr(benchmarks, function_name),", "True # at least one experiment executionTime[k] = x.executionTime a", "results in a file) 2. Export_details (Exporting the detailed results", "The number of independent runs params : set The set", "\"\"\" # Select general parameters for all optimizers (population size,", "Flag = True avgExecutionTime = float(\"%0.2f\" % (sum(executionTime) / NumOfRuns))", "cs import optimizers.BAT as bat import optimizers.WOA as woa import", "in range(0, len(optimizer)): for j in range(0, len(objectivefunc)): convergence =", "\"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag_details = True # at least", "population (PopulationSize) 2. The number of iterations (Iterations) export_flags :", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"MFO\":", "a = numpy.concatenate( [[optimizerName, objfname, avgExecutionTime], avgConvergence] ) writer.writerow(a) out.close()", "as conv_plot import plot_boxplot as box_plot warnings.simplefilter(action=\"ignore\") def selector(algo, func_details,", "x = cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "numpy.concatenate( [[x.optimizer, x.objfname, x.executionTime], x.convergence] ) writer.writerow(a) out.close() if Export", "selected. Check lists of available optimizers and cost functions\" )", "= export_flags[\"Export_avg\"] Export_details = export_flags[\"Export_details\"] Export_convergence = export_flags[\"Export_convergence\"] Export_boxplot =", "optimizers.BAT as bat import optimizers.WOA as woa import optimizers.FFA as", "elif algo == \"CS\": x = cs.CS(getattr(benchmarks, function_name), lb, ub,", "popSize, Iter) elif algo == \"MFO\": x = mfo.MFO(getattr(benchmarks, function_name),", "time to write the header of the CSV file header", "writer.writerow(header) Flag = True avgExecutionTime = float(\"%0.2f\" % (sum(executionTime) /", "if algo == \"SSA\": x = ssa.SSA(getattr(benchmarks, function_name), lb, ub,", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"BAT\":", "x = mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "main interface of the framework for running the experiments. Parameters", "CnvgHeader.append(\"Iter\" + str(l + 1)) for i in range(0, len(optimizer)):", "= mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "number of iterations) .... PopulationSize = params[\"PopulationSize\"] Iterations = params[\"Iterations\"]", "ExportToFile = results_directory + \"experiment.csv\" with open(ExportToFile, \"a\", newline=\"\\n\") as", ": set The set of Boolean flags which are: 1.", "as mvo import optimizers.GWO as gwo import optimizers.MFO as mfo", "files) 3. Export_convergence (Exporting the covergence plots) 4. Export_boxplot (Exporting", "results ? Export = export_flags[\"Export_avg\"] Export_details = export_flags[\"Export_details\"] Export_convergence =", "): # just one time to write the header of", "[[x.optimizer, x.objfname, x.executionTime], x.convergence] ) writer.writerow(a) out.close() if Export ==", "import optimizers.HHO as hho import optimizers.SCA as sca import optimizers.JAYA", "[] results_directory = time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \"/\" Path(results_directory).mkdir(parents=True, exist_ok=True) for l", "run(optimizer, objectivefunc, NumOfRuns, params, export_flags): \"\"\" It serves as the", "Path import optimizers.PSO as pso import optimizers.MVO as mvo import", "% (sum(executionTime) / NumOfRuns)) avgConvergence = numpy.around( numpy.mean(convergence, axis=0, dtype=numpy.float64),", "= hybrid.HYBRID(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) else: return", "x = mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "== \"JAYA\": x = jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim, popSize,", "(Exporting the covergence plots) 4. Export_boxplot (Exporting the box plots)", "box_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Flag == False: # Faild", "func_details, popSize, Iter): function_name = func_details[0] lb = func_details[1] ub", "dim, popSize, Iter) elif algo == \"GA\": x = ga.GA(getattr(benchmarks,", "if ( Flag_details == False ): # just one time", "return x def run(optimizer, objectivefunc, NumOfRuns, params, export_flags): \"\"\" It", "all optimizers (population size, number of iterations) .... PopulationSize =", "numpy import time import warnings import os import plot_convergence as", "selector(optimizer[i], func_details, PopulationSize, Iterations) convergence[k] = x.convergence optimizerName = x.optimizer", "dim, popSize, Iter) elif algo == \"HHO\": x = hho.HHO(getattr(benchmarks,", "dim, popSize, Iter) elif algo == \"JAYA\": x = jaya.JAYA(getattr(benchmarks,", "iterations) .... PopulationSize = params[\"PopulationSize\"] Iterations = params[\"Iterations\"] # Export", "Iter) elif algo == \"PSO\": x = pso.PSO(getattr(benchmarks, function_name), lb,", "lb, ub, dim, popSize, Iter) elif algo == \"WOA\": x", "optimizer : list The list of optimizers names objectivefunc :", "import optimizers.CS as cs import optimizers.BAT as bat import optimizers.WOA", "gwo import optimizers.MFO as mfo import optimizers.CS as cs import", "box_plot warnings.simplefilter(action=\"ignore\") def selector(algo, func_details, popSize, Iter): function_name = func_details[0]", "(Exporting the box plots) Returns ----------- N/A \"\"\" # Select", "pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "PopulationSize = params[\"PopulationSize\"] Iterations = params[\"Iterations\"] # Export results ?", "writer.writerow(header) Flag_details = True # at least one experiment executionTime[k]", "[[\"Optimizer\", \"objfname\", \"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag = True avgExecutionTime", "1)) for i in range(0, len(optimizer)): for j in range(0,", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"GWO\":", "= results_directory + \"experiment_details.csv\" with open(ExportToFile, \"a\", newline=\"\\n\") as out:", "= func_details[2] dim = func_details[3] if algo == \"SSA\": x", "selector(algo, func_details, popSize, Iter): function_name = func_details[0] lb = func_details[1]", "for k in range(0, NumOfRuns): func_details = benchmarks.getFunctionDetails(objectivefunc[j]) x =", "= x.executionTime a = numpy.concatenate( [[x.optimizer, x.objfname, x.executionTime], x.convergence] )", "null return x def run(optimizer, objectivefunc, NumOfRuns, params, export_flags): \"\"\"", "\"MFO\": x = mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "ExportToFile = results_directory + \"experiment_details.csv\" with open(ExportToFile, \"a\", newline=\"\\n\") as", "conv_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Export_boxplot == True: box_plot.run(results_directory, optimizer,", "as ffa import optimizers.SSA as ssa import optimizers.GA as ga", "ssa import optimizers.GA as ga import optimizers.HHO as hho import", "( Flag_details == False ): # just one time to", "(PopulationSize) 2. The number of iterations (Iterations) export_flags : set", "the CSV file header = numpy.concatenate( [[\"Optimizer\", \"objfname\", \"ExecutionTime\"], CnvgHeader]", "out: writer = csv.writer(out, delimiter=\",\") if ( Flag == False", "list The list of optimizers names objectivefunc : list The", "export_flags[\"Export_details\"] Export_convergence = export_flags[\"Export_convergence\"] Export_boxplot = export_flags[\"Export_boxplot\"] Flag = False", "+ \"experiment_details.csv\" with open(ExportToFile, \"a\", newline=\"\\n\") as out: writer =", "lb, ub, dim, popSize, Iter) elif algo == \"MFO\": x", "= csv.writer(out, delimiter=\",\") if ( Flag == False ): #", "Optomizer or Cost function is selected. Check lists of available", "----------- N/A \"\"\" # Select general parameters for all optimizers", "= pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "import optimizers.PSO as pso import optimizers.MVO as mvo import optimizers.GWO", "optimizers.JAYA as jaya import optimizers.HYBRID as hybrid import benchmarks import", "as sca import optimizers.JAYA as jaya import optimizers.HYBRID as hybrid", "= woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "ub, dim, popSize, Iter) elif algo == \"CS\": x =", "Flag_details = True # at least one experiment executionTime[k] =", "export_flags[\"Export_avg\"] Export_details = export_flags[\"Export_details\"] Export_convergence = export_flags[\"Export_convergence\"] Export_boxplot = export_flags[\"Export_boxplot\"]", "as pso import optimizers.MVO as mvo import optimizers.GWO as gwo", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"HHO\":", "ub, dim, popSize, Iter) else: return null return x def", "size, number of iterations) .... PopulationSize = params[\"PopulationSize\"] Iterations =", "= gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "algo == \"SCA\": x = sca.SCA(getattr(benchmarks, function_name), lb, ub, dim,", "False # CSV Header for for the cinvergence CnvgHeader =", "lb, ub, dim, popSize, Iter) elif algo == \"MVO\": x", "x = ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "dim = func_details[3] if algo == \"SSA\": x = ssa.SSA(getattr(benchmarks,", "= x.objfname if Export_details == True: ExportToFile = results_directory +", "as cs import optimizers.BAT as bat import optimizers.WOA as woa", "file) 2. Export_details (Exporting the detailed results in files) 3.", "elif algo == \"BAT\": x = bat.BAT(getattr(benchmarks, function_name), lb, ub,", "Export = export_flags[\"Export_avg\"] Export_details = export_flags[\"Export_details\"] Export_convergence = export_flags[\"Export_convergence\"] Export_boxplot", "Iter) elif algo == \"HYBRID\": x = hybrid.HYBRID(getattr(benchmarks, function_name), lb,", "framework for running the experiments. Parameters ---------- optimizer : list", "sca.SCA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "== \"FFA\": x = ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize,", "if Export_convergence == True: conv_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Export_boxplot", "as out: writer = csv.writer(out, delimiter=\",\") if ( Flag ==", "optimizers.SSA as ssa import optimizers.GA as ga import optimizers.HHO as", "csv.writer(out, delimiter=\",\") if ( Flag == False ): # just", "for for the cinvergence CnvgHeader = [] results_directory = time.strftime(\"%Y-%m-%d-%H-%M-%S\")", "elif algo == \"JAYA\": x = jaya.JAYA(getattr(benchmarks, function_name), lb, ub,", "x = gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "x.convergence optimizerName = x.optimizer objfname = x.objfname if Export_details ==", "\"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag = True avgExecutionTime = float(\"%0.2f\"", "Export_details = export_flags[\"Export_details\"] Export_convergence = export_flags[\"Export_convergence\"] Export_boxplot = export_flags[\"Export_boxplot\"] Flag", "lb, ub, dim, popSize, Iter) elif algo == \"HYBRID\": x", "csv.writer(out, delimiter=\",\") if ( Flag_details == False ): # just", "func_details, PopulationSize, Iterations) convergence[k] = x.convergence optimizerName = x.optimizer objfname", "avgExecutionTime = float(\"%0.2f\" % (sum(executionTime) / NumOfRuns)) avgConvergence = numpy.around(", "delimiter=\",\") if ( Flag_details == False ): # just one", "optimizerName = x.optimizer objfname = x.objfname if Export_details == True:", "lb, ub, dim, popSize, Iter) elif algo == \"BAT\": x", "the main interface of the framework for running the experiments.", "as hybrid import benchmarks import csv import numpy import time", "+ \"/\" Path(results_directory).mkdir(parents=True, exist_ok=True) for l in range(0, Iterations): CnvgHeader.append(\"Iter\"", ") writer.writerow(a) out.close() if Export_convergence == True: conv_plot.run(results_directory, optimizer, objectivefunc,", "PopulationSize, Iterations) convergence[k] = x.convergence optimizerName = x.optimizer objfname =", "benchmarks.getFunctionDetails(objectivefunc[j]) x = selector(optimizer[i], func_details, PopulationSize, Iterations) convergence[k] = x.convergence", "popSize, Iter): function_name = func_details[0] lb = func_details[1] ub =", "optimizers.GA as ga import optimizers.HHO as hho import optimizers.SCA as", "# Faild to run at least one experiment print( \"No", "Faild to run at least one experiment print( \"No Optomizer", "import numpy import time import warnings import os import plot_convergence", "The list of optimizers names objectivefunc : list The list", "lb, ub, dim, popSize, Iter) else: return null return x", "x = selector(optimizer[i], func_details, PopulationSize, Iterations) convergence[k] = x.convergence optimizerName", "\"a\", newline=\"\\n\") as out: writer = csv.writer(out, delimiter=\",\") if (", "objectivefunc, NumOfRuns, params, export_flags): \"\"\" It serves as the main", "Iter) elif algo == \"FFA\": x = ffa.FFA(getattr(benchmarks, function_name), lb,", "export_flags): \"\"\" It serves as the main interface of the", "x = ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "Flag = False Flag_details = False # CSV Header for", "as bat import optimizers.WOA as woa import optimizers.FFA as ffa", "== False ): # just one time to write the", "\"JAYA\": x = jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "True: box_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Flag == False: #", "range(0, NumOfRuns): func_details = benchmarks.getFunctionDetails(objectivefunc[j]) x = selector(optimizer[i], func_details, PopulationSize,", "algo == \"FFA\": x = ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim,", "algo == \"HHO\": x = hho.HHO(getattr(benchmarks, function_name), lb, ub, dim,", "header of the CSV file header = numpy.concatenate( [[\"Optimizer\", \"objfname\",", "Export results ? Export = export_flags[\"Export_avg\"] Export_details = export_flags[\"Export_details\"] Export_convergence", "of population (PopulationSize) 2. The number of iterations (Iterations) export_flags", "experiment executionTime[k] = x.executionTime a = numpy.concatenate( [[x.optimizer, x.objfname, x.executionTime],", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"JAYA\":", "True: conv_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Export_boxplot == True: box_plot.run(results_directory,", "Iter) elif algo == \"HHO\": x = hho.HHO(getattr(benchmarks, function_name), lb,", "least one experiment print( \"No Optomizer or Cost function is", "\"CS\": x = cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "the covergence plots) 4. Export_boxplot (Exporting the box plots) Returns", "independent runs params : set The set of parameters which", "of iterations) .... PopulationSize = params[\"PopulationSize\"] Iterations = params[\"Iterations\"] #", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"PSO\":", "benchmarks import csv import numpy import time import warnings import", "import optimizers.JAYA as jaya import optimizers.HYBRID as hybrid import benchmarks", "list of optimizers names objectivefunc : list The list of", "True: ExportToFile = results_directory + \"experiment.csv\" with open(ExportToFile, \"a\", newline=\"\\n\")", "Iter) elif algo == \"JAYA\": x = jaya.JAYA(getattr(benchmarks, function_name), lb,", "Iter) elif algo == \"CS\": x = cs.CS(getattr(benchmarks, function_name), lb,", "== True: ExportToFile = results_directory + \"experiment.csv\" with open(ExportToFile, \"a\",", "optimizers.GWO as gwo import optimizers.MFO as mfo import optimizers.CS as", "the experiments. Parameters ---------- optimizer : list The list of", "The list of benchmark functions NumOfRuns : int The number", "experiments. Parameters ---------- optimizer : list The list of optimizers", "params[\"PopulationSize\"] Iterations = params[\"Iterations\"] # Export results ? Export =", "[0] * NumOfRuns for k in range(0, NumOfRuns): func_details =", "to write the header of the CSV file header =", "objfname, avgExecutionTime], avgConvergence] ) writer.writerow(a) out.close() if Export_convergence == True:", "(Exporting the detailed results in files) 3. Export_convergence (Exporting the", "* NumOfRuns for k in range(0, NumOfRuns): func_details = benchmarks.getFunctionDetails(objectivefunc[j])", "= export_flags[\"Export_convergence\"] Export_boxplot = export_flags[\"Export_boxplot\"] Flag = False Flag_details =", "of Boolean flags which are: 1. Export (Exporting the results", "Export_convergence (Exporting the covergence plots) 4. Export_boxplot (Exporting the box", "import optimizers.HYBRID as hybrid import benchmarks import csv import numpy", "for the cinvergence CnvgHeader = [] results_directory = time.strftime(\"%Y-%m-%d-%H-%M-%S\") +", "if Export_details == True: ExportToFile = results_directory + \"experiment_details.csv\" with", "= params[\"PopulationSize\"] Iterations = params[\"Iterations\"] # Export results ? Export", "Flag == False ): # just one time to write", "as box_plot warnings.simplefilter(action=\"ignore\") def selector(algo, func_details, popSize, Iter): function_name =", "NumOfRuns executionTime = [0] * NumOfRuns for k in range(0,", "lb, ub, dim, popSize, Iter) elif algo == \"CS\": x", "= ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "Iterations) if Export_boxplot == True: box_plot.run(results_directory, optimizer, objectivefunc, Iterations) if", "params, export_flags): \"\"\" It serves as the main interface of", "results_directory = time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \"/\" Path(results_directory).mkdir(parents=True, exist_ok=True) for l in", "if Flag == False: # Faild to run at least", "lb, ub, dim, popSize, Iter) elif algo == \"SCA\": x", "for j in range(0, len(objectivefunc)): convergence = [0] * NumOfRuns", "import optimizers.MVO as mvo import optimizers.GWO as gwo import optimizers.MFO", "= bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "Iterations) convergence[k] = x.convergence optimizerName = x.optimizer objfname = x.objfname", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"MVO\":", "of parameters which are: 1. Size of population (PopulationSize) 2.", "pathlib import Path import optimizers.PSO as pso import optimizers.MVO as", "the detailed results in files) 3. Export_convergence (Exporting the covergence", "at least one experiment executionTime[k] = x.executionTime a = numpy.concatenate(", "dim, popSize, Iter) elif algo == \"HYBRID\": x = hybrid.HYBRID(getattr(benchmarks,", "woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "avgExecutionTime], avgConvergence] ) writer.writerow(a) out.close() if Export_convergence == True: conv_plot.run(results_directory,", "warnings.simplefilter(action=\"ignore\") def selector(algo, func_details, popSize, Iter): function_name = func_details[0] lb", "ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", ") writer.writerow(header) Flag = True avgExecutionTime = float(\"%0.2f\" % (sum(executionTime)", "? Export = export_flags[\"Export_avg\"] Export_details = export_flags[\"Export_details\"] Export_convergence = export_flags[\"Export_convergence\"]", "header = numpy.concatenate( [[\"Optimizer\", \"objfname\", \"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag", "Iterations) if Flag == False: # Faild to run at", "/ NumOfRuns)) avgConvergence = numpy.around( numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2 ).tolist()", "Export_details == True: ExportToFile = results_directory + \"experiment_details.csv\" with open(ExportToFile,", "\"experiment.csv\" with open(ExportToFile, \"a\", newline=\"\\n\") as out: writer = csv.writer(out,", "popSize, Iter) elif algo == \"WOA\": x = woa.WOA(getattr(benchmarks, function_name),", "optimizers names objectivefunc : list The list of benchmark functions", "elif algo == \"SCA\": x = sca.SCA(getattr(benchmarks, function_name), lb, ub,", "The set of Boolean flags which are: 1. Export (Exporting", "Export == True: ExportToFile = results_directory + \"experiment.csv\" with open(ExportToFile,", "lb, ub, dim, popSize, Iter) elif algo == \"FFA\": x", "== \"BAT\": x = bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize,", "algo == \"PSO\": x = pso.PSO(getattr(benchmarks, function_name), lb, ub, dim,", "\"WOA\": x = woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "+ 1)) for i in range(0, len(optimizer)): for j in", "import optimizers.GWO as gwo import optimizers.MFO as mfo import optimizers.CS", "\"MVO\": x = mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "def selector(algo, func_details, popSize, Iter): function_name = func_details[0] lb =", "woa import optimizers.FFA as ffa import optimizers.SSA as ssa import", "lb = func_details[1] ub = func_details[2] dim = func_details[3] if", "popSize, Iter) elif algo == \"GWO\": x = gwo.GWO(getattr(benchmarks, function_name),", "time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \"/\" Path(results_directory).mkdir(parents=True, exist_ok=True) for l in range(0, Iterations):", "+ str(l + 1)) for i in range(0, len(optimizer)): for", "objectivefunc, Iterations) if Export_boxplot == True: box_plot.run(results_directory, optimizer, objectivefunc, Iterations)", "params[\"Iterations\"] # Export results ? Export = export_flags[\"Export_avg\"] Export_details =", "= True avgExecutionTime = float(\"%0.2f\" % (sum(executionTime) / NumOfRuns)) avgConvergence", "objectivefunc : list The list of benchmark functions NumOfRuns :", "float(\"%0.2f\" % (sum(executionTime) / NumOfRuns)) avgConvergence = numpy.around( numpy.mean(convergence, axis=0,", "= [0] * NumOfRuns for k in range(0, NumOfRuns): func_details", "else: return null return x def run(optimizer, objectivefunc, NumOfRuns, params,", "\"objfname\", \"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag = True avgExecutionTime =", "mfo import optimizers.CS as cs import optimizers.BAT as bat import", "axis=0, dtype=numpy.float64), decimals=2 ).tolist() a = numpy.concatenate( [[optimizerName, objfname, avgExecutionTime],", "False ): # just one time to write the header", "out.close() if Export_convergence == True: conv_plot.run(results_directory, optimizer, objectivefunc, Iterations) if", "csv import numpy import time import warnings import os import", "CnvgHeader = [] results_directory = time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \"/\" Path(results_directory).mkdir(parents=True, exist_ok=True)", "set The set of parameters which are: 1. Size of", "as out: writer = csv.writer(out, delimiter=\",\") if ( Flag_details ==", "newline=\"\\n\") as out: writer = csv.writer(out, delimiter=\",\") if ( Flag_details", "avgConvergence = numpy.around( numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2 ).tolist() a =", "Size of population (PopulationSize) 2. The number of iterations (Iterations)", "It serves as the main interface of the framework for", "NumOfRuns): func_details = benchmarks.getFunctionDetails(objectivefunc[j]) x = selector(optimizer[i], func_details, PopulationSize, Iterations)", "executionTime[k] = x.executionTime a = numpy.concatenate( [[x.optimizer, x.objfname, x.executionTime], x.convergence]", "optimizers.MVO as mvo import optimizers.GWO as gwo import optimizers.MFO as", "Flag_details == False ): # just one time to write", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"CS\":", "for i in range(0, len(optimizer)): for j in range(0, len(objectivefunc)):", "NumOfRuns)) avgConvergence = numpy.around( numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2 ).tolist() a", "popSize, Iter) elif algo == \"HYBRID\": x = hybrid.HYBRID(getattr(benchmarks, function_name),", "numpy.around( numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2 ).tolist() a = numpy.concatenate( [[optimizerName,", "Iter) elif algo == \"GWO\": x = gwo.GWO(getattr(benchmarks, function_name), lb,", "elif algo == \"HHO\": x = hho.HHO(getattr(benchmarks, function_name), lb, ub,", "plot_convergence as conv_plot import plot_boxplot as box_plot warnings.simplefilter(action=\"ignore\") def selector(algo,", "import plot_boxplot as box_plot warnings.simplefilter(action=\"ignore\") def selector(algo, func_details, popSize, Iter):", "for running the experiments. Parameters ---------- optimizer : list The", "len(optimizer)): for j in range(0, len(objectivefunc)): convergence = [0] *", "pso import optimizers.MVO as mvo import optimizers.GWO as gwo import", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"HYBRID\":", "The number of iterations (Iterations) export_flags : set The set", "True: ExportToFile = results_directory + \"experiment_details.csv\" with open(ExportToFile, \"a\", newline=\"\\n\")", "Parameters ---------- optimizer : list The list of optimizers names", "is selected. Check lists of available optimizers and cost functions\"", "import os import plot_convergence as conv_plot import plot_boxplot as box_plot", "== \"WOA\": x = woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize,", ": int The number of independent runs params : set", "iterations (Iterations) export_flags : set The set of Boolean flags", "= numpy.concatenate( [[x.optimizer, x.objfname, x.executionTime], x.convergence] ) writer.writerow(a) out.close() if", "exist_ok=True) for l in range(0, Iterations): CnvgHeader.append(\"Iter\" + str(l +", "import optimizers.GA as ga import optimizers.HHO as hho import optimizers.SCA", "import optimizers.BAT as bat import optimizers.WOA as woa import optimizers.FFA", "dim, popSize, Iter) elif algo == \"BAT\": x = bat.BAT(getattr(benchmarks,", "\"HHO\": x = hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "one experiment print( \"No Optomizer or Cost function is selected.", "plot_boxplot as box_plot warnings.simplefilter(action=\"ignore\") def selector(algo, func_details, popSize, Iter): function_name", "serves as the main interface of the framework for running", "header = numpy.concatenate( [[\"Optimizer\", \"objfname\", \"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag_details", "of the framework for running the experiments. Parameters ---------- optimizer", "\"objfname\", \"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag_details = True # at", "out.close() if Export == True: ExportToFile = results_directory + \"experiment.csv\"", "as hho import optimizers.SCA as sca import optimizers.JAYA as jaya", "= selector(optimizer[i], func_details, PopulationSize, Iterations) convergence[k] = x.convergence optimizerName =", "Flag == False: # Faild to run at least one", "import csv import numpy import time import warnings import os", "x.optimizer objfname = x.objfname if Export_details == True: ExportToFile =", "as ga import optimizers.HHO as hho import optimizers.SCA as sca", "x.objfname if Export_details == True: ExportToFile = results_directory + \"experiment_details.csv\"", "parameters which are: 1. Size of population (PopulationSize) 2. The", "\"SCA\": x = sca.SCA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "in a file) 2. Export_details (Exporting the detailed results in", "of benchmark functions NumOfRuns : int The number of independent", "= float(\"%0.2f\" % (sum(executionTime) / NumOfRuns)) avgConvergence = numpy.around( numpy.mean(convergence,", "ub, dim, popSize, Iter) elif algo == \"GWO\": x =", "Export_boxplot = export_flags[\"Export_boxplot\"] Flag = False Flag_details = False #", "False: # Faild to run at least one experiment print(", "cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "parameters for all optimizers (population size, number of iterations) ....", "func_details = benchmarks.getFunctionDetails(objectivefunc[j]) x = selector(optimizer[i], func_details, PopulationSize, Iterations) convergence[k]", "for all optimizers (population size, number of iterations) .... PopulationSize", "2. The number of iterations (Iterations) export_flags : set The", "which are: 1. Export (Exporting the results in a file)", "str(l + 1)) for i in range(0, len(optimizer)): for j", "x = bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "the framework for running the experiments. Parameters ---------- optimizer :", "int The number of independent runs params : set The", "4. Export_boxplot (Exporting the box plots) Returns ----------- N/A \"\"\"", "optimizers.HYBRID as hybrid import benchmarks import csv import numpy import", "dim, popSize, Iter) elif algo == \"FFA\": x = ffa.FFA(getattr(benchmarks,", "= ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "are: 1. Size of population (PopulationSize) 2. The number of", "Export_boxplot (Exporting the box plots) Returns ----------- N/A \"\"\" #", "popSize, Iter) elif algo == \"MVO\": x = mvo.MVO(getattr(benchmarks, function_name),", "Path(results_directory).mkdir(parents=True, exist_ok=True) for l in range(0, Iterations): CnvgHeader.append(\"Iter\" + str(l", "popSize, Iter) elif algo == \"FFA\": x = ffa.FFA(getattr(benchmarks, function_name),", "as gwo import optimizers.MFO as mfo import optimizers.CS as cs", "Iter): function_name = func_details[0] lb = func_details[1] ub = func_details[2]", "dim, popSize, Iter) elif algo == \"PSO\": x = pso.PSO(getattr(benchmarks,", "[[optimizerName, objfname, avgExecutionTime], avgConvergence] ) writer.writerow(a) out.close() if Export_convergence ==", "as mfo import optimizers.CS as cs import optimizers.BAT as bat", "runs params : set The set of parameters which are:", "ub, dim, popSize, Iter) elif algo == \"WOA\": x =", "set The set of Boolean flags which are: 1. Export", "\"/\" Path(results_directory).mkdir(parents=True, exist_ok=True) for l in range(0, Iterations): CnvgHeader.append(\"Iter\" +", "== \"MFO\": x = mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize,", "for l in range(0, Iterations): CnvgHeader.append(\"Iter\" + str(l + 1))", "optimizers.HHO as hho import optimizers.SCA as sca import optimizers.JAYA as", "gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "= [0] * NumOfRuns executionTime = [0] * NumOfRuns for", "== \"MVO\": x = mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize,", "elif algo == \"WOA\": x = woa.WOA(getattr(benchmarks, function_name), lb, ub,", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"WOA\":", "NumOfRuns : int The number of independent runs params :", "popSize, Iter) elif algo == \"BAT\": x = bat.BAT(getattr(benchmarks, function_name),", "CSV Header for for the cinvergence CnvgHeader = [] results_directory", "* NumOfRuns executionTime = [0] * NumOfRuns for k in", "import Path import optimizers.PSO as pso import optimizers.MVO as mvo", "hybrid.HYBRID(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) else: return null", "\"FFA\": x = ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "ga import optimizers.HHO as hho import optimizers.SCA as sca import", "ub, dim, popSize, Iter) elif algo == \"SCA\": x =", "Select general parameters for all optimizers (population size, number of", "one experiment executionTime[k] = x.executionTime a = numpy.concatenate( [[x.optimizer, x.objfname,", "convergence[k] = x.convergence optimizerName = x.optimizer objfname = x.objfname if", "file header = numpy.concatenate( [[\"Optimizer\", \"objfname\", \"ExecutionTime\"], CnvgHeader] ) writer.writerow(header)", "running the experiments. Parameters ---------- optimizer : list The list", "hybrid import benchmarks import csv import numpy import time import", "just one time to write the header of the CSV", "Iter) elif algo == \"MFO\": x = mfo.MFO(getattr(benchmarks, function_name), lb,", "x def run(optimizer, objectivefunc, NumOfRuns, params, export_flags): \"\"\" It serves", "import optimizers.FFA as ffa import optimizers.SSA as ssa import optimizers.GA", "writer = csv.writer(out, delimiter=\",\") if ( Flag_details == False ):", "elif algo == \"PSO\": x = pso.PSO(getattr(benchmarks, function_name), lb, ub,", "a file) 2. Export_details (Exporting the detailed results in files)", "The set of parameters which are: 1. Size of population", "x = ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "= func_details[3] if algo == \"SSA\": x = ssa.SSA(getattr(benchmarks, function_name),", "= hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "Iter) else: return null return x def run(optimizer, objectivefunc, NumOfRuns,", "= export_flags[\"Export_boxplot\"] Flag = False Flag_details = False # CSV", "or Cost function is selected. Check lists of available optimizers", "of the CSV file header = numpy.concatenate( [[\"Optimizer\", \"objfname\", \"ExecutionTime\"],", "optimizers.PSO as pso import optimizers.MVO as mvo import optimizers.GWO as", "= x.optimizer objfname = x.objfname if Export_details == True: ExportToFile", "os import plot_convergence as conv_plot import plot_boxplot as box_plot warnings.simplefilter(action=\"ignore\")", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"SCA\":", "the header of the CSV file header = numpy.concatenate( [[\"Optimizer\",", "ub, dim, popSize, Iter) elif algo == \"MVO\": x =", "the box plots) Returns ----------- N/A \"\"\" # Select general", "elif algo == \"MFO\": x = mfo.MFO(getattr(benchmarks, function_name), lb, ub,", "3. Export_convergence (Exporting the covergence plots) 4. Export_boxplot (Exporting the", "k in range(0, NumOfRuns): func_details = benchmarks.getFunctionDetails(objectivefunc[j]) x = selector(optimizer[i],", "optimizers.CS as cs import optimizers.BAT as bat import optimizers.WOA as", "in range(0, NumOfRuns): func_details = benchmarks.getFunctionDetails(objectivefunc[j]) x = selector(optimizer[i], func_details,", "== True: ExportToFile = results_directory + \"experiment_details.csv\" with open(ExportToFile, \"a\",", "= export_flags[\"Export_details\"] Export_convergence = export_flags[\"Export_convergence\"] Export_boxplot = export_flags[\"Export_boxplot\"] Flag =", "if ( Flag == False ): # just one time", "import optimizers.MFO as mfo import optimizers.CS as cs import optimizers.BAT", "CnvgHeader] ) writer.writerow(header) Flag = True avgExecutionTime = float(\"%0.2f\" %", "== \"GWO\": x = gwo.GWO(getattr(benchmarks, function_name), lb, ub, dim, popSize,", "popSize, Iter) else: return null return x def run(optimizer, objectivefunc,", "\"experiment_details.csv\" with open(ExportToFile, \"a\", newline=\"\\n\") as out: writer = csv.writer(out,", "(Iterations) export_flags : set The set of Boolean flags which", "= True # at least one experiment executionTime[k] = x.executionTime", "optimizers.WOA as woa import optimizers.FFA as ffa import optimizers.SSA as", "# at least one experiment executionTime[k] = x.executionTime a =", "objfname = x.objfname if Export_details == True: ExportToFile = results_directory", "import optimizers.WOA as woa import optimizers.FFA as ffa import optimizers.SSA", "(population size, number of iterations) .... PopulationSize = params[\"PopulationSize\"] Iterations", "hho import optimizers.SCA as sca import optimizers.JAYA as jaya import", "ub, dim, popSize, Iter) elif algo == \"BAT\": x =", "= time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \"/\" Path(results_directory).mkdir(parents=True, exist_ok=True) for l in range(0,", "algo == \"WOA\": x = woa.WOA(getattr(benchmarks, function_name), lb, ub, dim,", "l in range(0, Iterations): CnvgHeader.append(\"Iter\" + str(l + 1)) for", "= [] results_directory = time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \"/\" Path(results_directory).mkdir(parents=True, exist_ok=True) for", "mvo import optimizers.GWO as gwo import optimizers.MFO as mfo import", "number of independent runs params : set The set of", "function_name), lb, ub, dim, popSize, Iter) else: return null return", "in range(0, Iterations): CnvgHeader.append(\"Iter\" + str(l + 1)) for i", ").tolist() a = numpy.concatenate( [[optimizerName, objfname, avgExecutionTime], avgConvergence] ) writer.writerow(a)", "\"SSA\": x = ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "range(0, len(objectivefunc)): convergence = [0] * NumOfRuns executionTime = [0]", "= numpy.around( numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2 ).tolist() a = numpy.concatenate(", "dim, popSize, Iter) else: return null return x def run(optimizer,", "import optimizers.SSA as ssa import optimizers.GA as ga import optimizers.HHO", "as the main interface of the framework for running the", "x = hybrid.HYBRID(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) else:", "algo == \"BAT\": x = bat.BAT(getattr(benchmarks, function_name), lb, ub, dim,", "Check lists of available optimizers and cost functions\" ) print(\"Execution", "executionTime = [0] * NumOfRuns for k in range(0, NumOfRuns):", "= func_details[1] ub = func_details[2] dim = func_details[3] if algo", ".... PopulationSize = params[\"PopulationSize\"] Iterations = params[\"Iterations\"] # Export results", "ub, dim, popSize, Iter) elif algo == \"FFA\": x =", "optimizer, objectivefunc, Iterations) if Export_boxplot == True: box_plot.run(results_directory, optimizer, objectivefunc,", "a = numpy.concatenate( [[x.optimizer, x.objfname, x.executionTime], x.convergence] ) writer.writerow(a) out.close()", "hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "ub, dim, popSize, Iter) elif algo == \"HHO\": x =", "== \"HHO\": x = hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize,", ": list The list of benchmark functions NumOfRuns : int", "to run at least one experiment print( \"No Optomizer or", "= cs.CS(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "= False Flag_details = False # CSV Header for for", "open(ExportToFile, \"a\", newline=\"\\n\") as out: writer = csv.writer(out, delimiter=\",\") if", "= numpy.concatenate( [[\"Optimizer\", \"objfname\", \"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag =", "ub, dim, popSize, Iter) elif algo == \"GA\": x =", "run at least one experiment print( \"No Optomizer or Cost", "in range(0, len(objectivefunc)): convergence = [0] * NumOfRuns executionTime =", "Export_boxplot == True: box_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Flag ==", "( Flag == False ): # just one time to", "lb, ub, dim, popSize, Iter) elif algo == \"PSO\": x", "elif algo == \"GA\": x = ga.GA(getattr(benchmarks, function_name), lb, ub,", "import time import warnings import os import plot_convergence as conv_plot", "1. Export (Exporting the results in a file) 2. Export_details", "as woa import optimizers.FFA as ffa import optimizers.SSA as ssa", "ub, dim, popSize, Iter) elif algo == \"JAYA\": x =", "ffa import optimizers.SSA as ssa import optimizers.GA as ga import", "detailed results in files) 3. Export_convergence (Exporting the covergence plots)", "algo == \"JAYA\": x = jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim,", "= jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "at least one experiment print( \"No Optomizer or Cost function", "are: 1. Export (Exporting the results in a file) 2.", "optimizers.FFA as ffa import optimizers.SSA as ssa import optimizers.GA as", "elif algo == \"GWO\": x = gwo.GWO(getattr(benchmarks, function_name), lb, ub,", "= params[\"Iterations\"] # Export results ? Export = export_flags[\"Export_avg\"] Export_details", "of optimizers names objectivefunc : list The list of benchmark", "x = pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "= numpy.concatenate( [[optimizerName, objfname, avgExecutionTime], avgConvergence] ) writer.writerow(a) out.close() if", "dim, popSize, Iter) elif algo == \"CS\": x = cs.CS(getattr(benchmarks,", "Export_convergence = export_flags[\"Export_convergence\"] Export_boxplot = export_flags[\"Export_boxplot\"] Flag = False Flag_details", "CnvgHeader] ) writer.writerow(header) Flag_details = True # at least one", "elif algo == \"MVO\": x = mvo.MVO(getattr(benchmarks, function_name), lb, ub,", "dim, popSize, Iter) elif algo == \"MVO\": x = mvo.MVO(getattr(benchmarks,", "newline=\"\\n\") as out: writer = csv.writer(out, delimiter=\",\") if ( Flag", "(sum(executionTime) / NumOfRuns)) avgConvergence = numpy.around( numpy.mean(convergence, axis=0, dtype=numpy.float64), decimals=2", "interface of the framework for running the experiments. Parameters ----------", "covergence plots) 4. Export_boxplot (Exporting the box plots) Returns -----------", "which are: 1. Size of population (PopulationSize) 2. The number", "functions NumOfRuns : int The number of independent runs params", "jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "results_directory + \"experiment.csv\" with open(ExportToFile, \"a\", newline=\"\\n\") as out: writer", "Boolean flags which are: 1. Export (Exporting the results in", "mfo.MFO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "Iter) elif algo == \"SCA\": x = sca.SCA(getattr(benchmarks, function_name), lb,", "optimizers.MFO as mfo import optimizers.CS as cs import optimizers.BAT as", "writer.writerow(a) out.close() if Export_convergence == True: conv_plot.run(results_directory, optimizer, objectivefunc, Iterations)", "1. Size of population (PopulationSize) 2. The number of iterations", "\"BAT\": x = bat.BAT(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "dim, popSize, Iter) elif algo == \"SCA\": x = sca.SCA(getattr(benchmarks,", "import benchmarks import csv import numpy import time import warnings", ": set The set of parameters which are: 1. Size", "conv_plot import plot_boxplot as box_plot warnings.simplefilter(action=\"ignore\") def selector(algo, func_details, popSize,", "\"No Optomizer or Cost function is selected. Check lists of", "2. Export_details (Exporting the detailed results in files) 3. Export_convergence", "params : set The set of parameters which are: 1.", "True avgExecutionTime = float(\"%0.2f\" % (sum(executionTime) / NumOfRuns)) avgConvergence =", "in files) 3. Export_convergence (Exporting the covergence plots) 4. Export_boxplot", "if Export_boxplot == True: box_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Flag", "---------- optimizer : list The list of optimizers names objectivefunc", "# CSV Header for for the cinvergence CnvgHeader = []", "x.objfname, x.executionTime], x.convergence] ) writer.writerow(a) out.close() if Export == True:", "popSize, Iter) elif algo == \"CS\": x = cs.CS(getattr(benchmarks, function_name),", "bat import optimizers.WOA as woa import optimizers.FFA as ffa import", "def run(optimizer, objectivefunc, NumOfRuns, params, export_flags): \"\"\" It serves as", "== True: conv_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Export_boxplot == True:", "set of Boolean flags which are: 1. Export (Exporting the", "sca import optimizers.JAYA as jaya import optimizers.HYBRID as hybrid import", "results_directory + \"experiment_details.csv\" with open(ExportToFile, \"a\", newline=\"\\n\") as out: writer", ") writer.writerow(header) Flag_details = True # at least one experiment", "writer.writerow(a) out.close() if Export == True: ExportToFile = results_directory +", "delimiter=\",\") if ( Flag == False ): # just one", "if Export == True: ExportToFile = results_directory + \"experiment.csv\" with", "numpy.concatenate( [[\"Optimizer\", \"objfname\", \"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag_details = True", "Export_convergence == True: conv_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Export_boxplot ==", "x = hho.HHO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "elif algo == \"HYBRID\": x = hybrid.HYBRID(getattr(benchmarks, function_name), lb, ub,", "the results in a file) 2. Export_details (Exporting the detailed", "func_details[2] dim = func_details[3] if algo == \"SSA\": x =", "export_flags[\"Export_convergence\"] Export_boxplot = export_flags[\"Export_boxplot\"] Flag = False Flag_details = False", "of independent runs params : set The set of parameters", "\"GA\": x = ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "# Export results ? Export = export_flags[\"Export_avg\"] Export_details = export_flags[\"Export_details\"]", "function_name = func_details[0] lb = func_details[1] ub = func_details[2] dim", "list The list of benchmark functions NumOfRuns : int The", "dim, popSize, Iter) elif algo == \"WOA\": x = woa.WOA(getattr(benchmarks,", "Returns ----------- N/A \"\"\" # Select general parameters for all", "Iterations): CnvgHeader.append(\"Iter\" + str(l + 1)) for i in range(0,", "algo == \"MVO\": x = mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim,", "x = sca.SCA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "mvo.MVO(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "N/A \"\"\" # Select general parameters for all optimizers (population", "algo == \"HYBRID\": x = hybrid.HYBRID(getattr(benchmarks, function_name), lb, ub, dim,", "= func_details[0] lb = func_details[1] ub = func_details[2] dim =", "j in range(0, len(objectivefunc)): convergence = [0] * NumOfRuns executionTime", "<reponame>thanusha22/CEC-1 from pathlib import Path import optimizers.PSO as pso import", "= False # CSV Header for for the cinvergence CnvgHeader", "popSize, Iter) elif algo == \"GA\": x = ga.GA(getattr(benchmarks, function_name),", "set of parameters which are: 1. Size of population (PopulationSize)", "with open(ExportToFile, \"a\", newline=\"\\n\") as out: writer = csv.writer(out, delimiter=\",\")", "from pathlib import Path import optimizers.PSO as pso import optimizers.MVO", "x.executionTime], x.convergence] ) writer.writerow(a) out.close() if Export == True: ExportToFile", "ub = func_details[2] dim = func_details[3] if algo == \"SSA\":", "optimizer, objectivefunc, Iterations) if Flag == False: # Faild to", "lb, ub, dim, popSize, Iter) elif algo == \"JAYA\": x", "+ \"experiment.csv\" with open(ExportToFile, \"a\", newline=\"\\n\") as out: writer =", "optimizers.SCA as sca import optimizers.JAYA as jaya import optimizers.HYBRID as", "= ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo", "export_flags[\"Export_boxplot\"] Flag = False Flag_details = False # CSV Header", "== False: # Faild to run at least one experiment", "print( \"No Optomizer or Cost function is selected. Check lists", "= csv.writer(out, delimiter=\",\") if ( Flag_details == False ): #", "== True: box_plot.run(results_directory, optimizer, objectivefunc, Iterations) if Flag == False:", "\"\"\" It serves as the main interface of the framework", "algo == \"SSA\": x = ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim,", "function_name), lb, ub, dim, popSize, Iter) elif algo == \"GA\":", "Export (Exporting the results in a file) 2. Export_details (Exporting", "x = woa.WOA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "import warnings import os import plot_convergence as conv_plot import plot_boxplot", "as jaya import optimizers.HYBRID as hybrid import benchmarks import csv", "x = jaya.JAYA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif", "[[\"Optimizer\", \"objfname\", \"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag_details = True #", "lists of available optimizers and cost functions\" ) print(\"Execution completed\")", "dim, popSize, Iter) elif algo == \"MFO\": x = mfo.MFO(getattr(benchmarks,", "ub, dim, popSize, Iter) elif algo == \"HYBRID\": x =", "numpy.concatenate( [[\"Optimizer\", \"objfname\", \"ExecutionTime\"], CnvgHeader] ) writer.writerow(header) Flag = True", "NumOfRuns for k in range(0, NumOfRuns): func_details = benchmarks.getFunctionDetails(objectivefunc[j]) x", "== \"PSO\": x = pso.PSO(getattr(benchmarks, function_name), lb, ub, dim, popSize,", ") writer.writerow(a) out.close() if Export == True: ExportToFile = results_directory", "ffa.FFA(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter) elif algo ==", "Iter) elif algo == \"MVO\": x = mvo.MVO(getattr(benchmarks, function_name), lb,", "== \"SSA\": x = ssa.SSA(getattr(benchmarks, function_name), lb, ub, dim, popSize,", "== \"GA\": x = ga.GA(getattr(benchmarks, function_name), lb, ub, dim, popSize,", "of iterations (Iterations) export_flags : set The set of Boolean", "writer = csv.writer(out, delimiter=\",\") if ( Flag == False ):", "experiment print( \"No Optomizer or Cost function is selected. Check", "box plots) Returns ----------- N/A \"\"\" # Select general parameters", "\"HYBRID\": x = hybrid.HYBRID(getattr(benchmarks, function_name), lb, ub, dim, popSize, Iter)", "x.executionTime a = numpy.concatenate( [[x.optimizer, x.objfname, x.executionTime], x.convergence] ) writer.writerow(a)", "cinvergence CnvgHeader = [] results_directory = time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \"/\" Path(results_directory).mkdir(parents=True,", "= benchmarks.getFunctionDetails(objectivefunc[j]) x = selector(optimizer[i], func_details, PopulationSize, Iterations) convergence[k] =", "Cost function is selected. Check lists of available optimizers and", "algo == \"GA\": x = ga.GA(getattr(benchmarks, function_name), lb, ub, dim,", "import plot_convergence as conv_plot import plot_boxplot as box_plot warnings.simplefilter(action=\"ignore\") def" ]
[ "import fields from . import consts @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types(primitive_type):", "= [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]] assert {\"name\": name, \"type\": avro_type, \"default\": fields.NULL}", "\"type\": avro_type} == field.to_dict() @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types_with_default_value_none(primitive_type): name =", "name = \"a_field\" field = fields.Field(name, primitive_type, dataclasses.MISSING) avro_type =", "default type. Default should be {primitive_type}\" with pytest.raises(AssertionError, match=msg): field.to_dict()", "\"default\": default} == field.to_dict() @pytest.mark.parametrize( \"primitive_type,invalid_default\", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS ) def test_invalid_default_values(primitive_type,", "fields.Field(name, primitive_type, invalid_default) msg = f\"Invalid default type. Default should", "default} == field.to_dict() @pytest.mark.parametrize( \"primitive_type,invalid_default\", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS ) def test_invalid_default_values(primitive_type, invalid_default):", "\"a_field\" field = fields.Field(name, primitive_type, None) avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]]", "= [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL] assert {\"name\": name, \"type\": avro_type, \"default\": default}", "= fields.PYTHON_TYPE_TO_AVRO[primitive_type] assert {\"name\": name, \"type\": avro_type} == field.to_dict() @pytest.mark.parametrize(\"primitive_type\",", "test_primitive_types(primitive_type): name = \"a_field\" field = fields.Field(name, primitive_type, dataclasses.MISSING) avro_type", "= \"a_field\" field = fields.Field(name, primitive_type, default) avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type],", "primitive_type, dataclasses.MISSING) avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type] assert {\"name\": name, \"type\": avro_type}", "\"a_field\" field = fields.Field(name, primitive_type, default) avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL]", "{\"name\": name, \"type\": avro_type} == field.to_dict() @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types_with_default_value_none(primitive_type):", "default) avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL] assert {\"name\": name, \"type\": avro_type,", "test_primitive_types_with_default_value_none(primitive_type): name = \"a_field\" field = fields.Field(name, primitive_type, None) avro_type", "{\"name\": name, \"type\": avro_type, \"default\": default} == field.to_dict() @pytest.mark.parametrize( \"primitive_type,invalid_default\",", "fields from . import consts @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types(primitive_type): name", "primitive_type, default) avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL] assert {\"name\": name, \"type\":", "import pytest from dataclasses_avroschema import fields from . import consts", ") def test_invalid_default_values(primitive_type, invalid_default): name = \"a_field\" field = fields.Field(name,", "test_invalid_default_values(primitive_type, invalid_default): name = \"a_field\" field = fields.Field(name, primitive_type, invalid_default)", "fields.NULL} == field.to_dict() @pytest.mark.parametrize(\"primitive_type,default\", consts.PRIMITIVE_TYPES_AND_DEFAULTS) def test_primitive_types_with_default_value(primitive_type, default): name =", "pytest from dataclasses_avroschema import fields from . import consts @pytest.mark.parametrize(\"primitive_type\",", "field.to_dict() @pytest.mark.parametrize(\"primitive_type,default\", consts.PRIMITIVE_TYPES_AND_DEFAULTS) def test_primitive_types_with_default_value(primitive_type, default): name = \"a_field\" field", "= fields.Field(name, primitive_type, invalid_default) msg = f\"Invalid default type. Default", "avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]] assert {\"name\": name, \"type\": avro_type, \"default\":", "\"primitive_type,invalid_default\", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS ) def test_invalid_default_values(primitive_type, invalid_default): name = \"a_field\" field", "== field.to_dict() @pytest.mark.parametrize( \"primitive_type,invalid_default\", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS ) def test_invalid_default_values(primitive_type, invalid_default): name", "\"default\": fields.NULL} == field.to_dict() @pytest.mark.parametrize(\"primitive_type,default\", consts.PRIMITIVE_TYPES_AND_DEFAULTS) def test_primitive_types_with_default_value(primitive_type, default): name", "from . import consts @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types(primitive_type): name =", ". import consts @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types(primitive_type): name = \"a_field\"", "test_primitive_types_with_default_value(primitive_type, default): name = \"a_field\" field = fields.Field(name, primitive_type, default)", "field = fields.Field(name, primitive_type, invalid_default) msg = f\"Invalid default type.", "avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL] assert {\"name\": name, \"type\": avro_type, \"default\":", "dataclasses import pytest from dataclasses_avroschema import fields from . import", "def test_invalid_default_values(primitive_type, invalid_default): name = \"a_field\" field = fields.Field(name, primitive_type,", "avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type] assert {\"name\": name, \"type\": avro_type} == field.to_dict()", "None) avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]] assert {\"name\": name, \"type\": avro_type,", "fields.Field(name, primitive_type, None) avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]] assert {\"name\": name,", "consts @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types(primitive_type): name = \"a_field\" field =", "invalid_default): name = \"a_field\" field = fields.Field(name, primitive_type, invalid_default) msg", "= fields.Field(name, primitive_type, None) avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]] assert {\"name\":", "def test_primitive_types_with_default_value_none(primitive_type): name = \"a_field\" field = fields.Field(name, primitive_type, None)", "fields.PYTHON_TYPE_TO_AVRO[primitive_type] assert {\"name\": name, \"type\": avro_type} == field.to_dict() @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES)", "primitive_type, invalid_default) msg = f\"Invalid default type. Default should be", "consts.PRIMITIVE_TYPES_AND_DEFAULTS) def test_primitive_types_with_default_value(primitive_type, default): name = \"a_field\" field = fields.Field(name,", "[fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL] assert {\"name\": name, \"type\": avro_type, \"default\": default} ==", "field = fields.Field(name, primitive_type, dataclasses.MISSING) avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type] assert {\"name\":", "f\"Invalid default type. Default should be {primitive_type}\" with pytest.raises(AssertionError, match=msg):", "consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS ) def test_invalid_default_values(primitive_type, invalid_default): name = \"a_field\" field =", "@pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types_with_default_value_none(primitive_type): name = \"a_field\" field = fields.Field(name,", "= fields.Field(name, primitive_type, dataclasses.MISSING) avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type] assert {\"name\": name,", "field.to_dict() @pytest.mark.parametrize( \"primitive_type,invalid_default\", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS ) def test_invalid_default_values(primitive_type, invalid_default): name =", "@pytest.mark.parametrize(\"primitive_type,default\", consts.PRIMITIVE_TYPES_AND_DEFAULTS) def test_primitive_types_with_default_value(primitive_type, default): name = \"a_field\" field =", "fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types(primitive_type): name = \"a_field\" field = fields.Field(name, primitive_type,", "def test_primitive_types(primitive_type): name = \"a_field\" field = fields.Field(name, primitive_type, dataclasses.MISSING)", "name, \"type\": avro_type, \"default\": default} == field.to_dict() @pytest.mark.parametrize( \"primitive_type,invalid_default\", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS", "= \"a_field\" field = fields.Field(name, primitive_type, invalid_default) msg = f\"Invalid", "field = fields.Field(name, primitive_type, None) avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]] assert", "[fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]] assert {\"name\": name, \"type\": avro_type, \"default\": fields.NULL} ==", "default): name = \"a_field\" field = fields.Field(name, primitive_type, default) avro_type", "= \"a_field\" field = fields.Field(name, primitive_type, dataclasses.MISSING) avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type]", "avro_type} == field.to_dict() @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types_with_default_value_none(primitive_type): name = \"a_field\"", "primitive_type, None) avro_type = [fields.NULL, fields.PYTHON_TYPE_TO_AVRO[primitive_type]] assert {\"name\": name, \"type\":", "fields.NULL] assert {\"name\": name, \"type\": avro_type, \"default\": default} == field.to_dict()", "name, \"type\": avro_type, \"default\": fields.NULL} == field.to_dict() @pytest.mark.parametrize(\"primitive_type,default\", consts.PRIMITIVE_TYPES_AND_DEFAULTS) def", "fields.Field(name, primitive_type, default) avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL] assert {\"name\": name,", "@pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types(primitive_type): name = \"a_field\" field = fields.Field(name,", "fields.PYTHON_TYPE_TO_AVRO[primitive_type]] assert {\"name\": name, \"type\": avro_type, \"default\": fields.NULL} == field.to_dict()", "invalid_default) msg = f\"Invalid default type. Default should be {primitive_type}\"", "\"type\": avro_type, \"default\": fields.NULL} == field.to_dict() @pytest.mark.parametrize(\"primitive_type,default\", consts.PRIMITIVE_TYPES_AND_DEFAULTS) def test_primitive_types_with_default_value(primitive_type,", "= fields.Field(name, primitive_type, default) avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL] assert {\"name\":", "\"a_field\" field = fields.Field(name, primitive_type, dataclasses.MISSING) avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type] assert", "name = \"a_field\" field = fields.Field(name, primitive_type, None) avro_type =", "import dataclasses import pytest from dataclasses_avroschema import fields from .", "= f\"Invalid default type. Default should be {primitive_type}\" with pytest.raises(AssertionError,", "dataclasses_avroschema import fields from . import consts @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def", "fields.Field(name, primitive_type, dataclasses.MISSING) avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type] assert {\"name\": name, \"type\":", "avro_type, \"default\": fields.NULL} == field.to_dict() @pytest.mark.parametrize(\"primitive_type,default\", consts.PRIMITIVE_TYPES_AND_DEFAULTS) def test_primitive_types_with_default_value(primitive_type, default):", "fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types_with_default_value_none(primitive_type): name = \"a_field\" field = fields.Field(name, primitive_type,", "== field.to_dict() @pytest.mark.parametrize(\"primitive_type,default\", consts.PRIMITIVE_TYPES_AND_DEFAULTS) def test_primitive_types_with_default_value(primitive_type, default): name = \"a_field\"", "def test_primitive_types_with_default_value(primitive_type, default): name = \"a_field\" field = fields.Field(name, primitive_type,", "avro_type, \"default\": default} == field.to_dict() @pytest.mark.parametrize( \"primitive_type,invalid_default\", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS ) def", "assert {\"name\": name, \"type\": avro_type} == field.to_dict() @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def", "field.to_dict() @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types_with_default_value_none(primitive_type): name = \"a_field\" field =", "name = \"a_field\" field = fields.Field(name, primitive_type, invalid_default) msg =", "\"type\": avro_type, \"default\": default} == field.to_dict() @pytest.mark.parametrize( \"primitive_type,invalid_default\", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS )", "= \"a_field\" field = fields.Field(name, primitive_type, None) avro_type = [fields.NULL,", "field = fields.Field(name, primitive_type, default) avro_type = [fields.PYTHON_TYPE_TO_AVRO[primitive_type], fields.NULL] assert", "msg = f\"Invalid default type. Default should be {primitive_type}\" with", "@pytest.mark.parametrize( \"primitive_type,invalid_default\", consts.PRIMITIVE_TYPES_AND_INVALID_DEFAULTS ) def test_invalid_default_values(primitive_type, invalid_default): name = \"a_field\"", "assert {\"name\": name, \"type\": avro_type, \"default\": fields.NULL} == field.to_dict() @pytest.mark.parametrize(\"primitive_type,default\",", "import consts @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types(primitive_type): name = \"a_field\" field", "from dataclasses_avroschema import fields from . import consts @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES)", "dataclasses.MISSING) avro_type = fields.PYTHON_TYPE_TO_AVRO[primitive_type] assert {\"name\": name, \"type\": avro_type} ==", "name = \"a_field\" field = fields.Field(name, primitive_type, default) avro_type =", "assert {\"name\": name, \"type\": avro_type, \"default\": default} == field.to_dict() @pytest.mark.parametrize(", "\"a_field\" field = fields.Field(name, primitive_type, invalid_default) msg = f\"Invalid default", "== field.to_dict() @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types_with_default_value_none(primitive_type): name = \"a_field\" field", "name, \"type\": avro_type} == field.to_dict() @pytest.mark.parametrize(\"primitive_type\", fields.PYTHON_INMUTABLE_TYPES) def test_primitive_types_with_default_value_none(primitive_type): name", "{\"name\": name, \"type\": avro_type, \"default\": fields.NULL} == field.to_dict() @pytest.mark.parametrize(\"primitive_type,default\", consts.PRIMITIVE_TYPES_AND_DEFAULTS)" ]
[ "desired # end effector position. ftCost = osim.MocoFinalTimeGoal() ftCost.setWeight(0.001) problem.addGoal(ftCost)", "track provided state data. stateTracking = osim.MocoStateTrackingGoal() stateTracking.setReference(osim.TableProcessor(stateRef)) problem.addGoal(stateTracking) effort", "reference. study.printToXML(\"examplePredictAndTrack_track_states.omoco\") # Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_states_solution.sto\")", "1 kg, center of mass at the body's # origin,", "to a setup file for reference. study.printToXML(\"examplePredictAndTrack_predict.omoco\") # Solve the", "0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100,", "trajectories from the optimal trajectory. \"\"\" visualize = True #", "for state in statesTraj: model.realizePosition(state) m0 = model.getComponent(\"markerset/m0\") m1 =", "osim.PhysicalOffsetFrame(\"b1_center\", b1, transform) b1.addComponent(b1Center) b1Center.attachGeometry(bodyGeometry.clone()) model.finalizeConnections() model.printToXML(\"double_pendulum.osim\") return model def", "= osim.PinJoint(\"j1\", b0, osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1, 0, 0), osim.Vec3(0))", "# # Copyright (c) 2018 Stanford University and the Authors", "guess.setNumTimes(2) guess.setTime([0, 1]) guess.setState(\"/jointset/j0/q0/value\", [0, -math.pi]) guess.setState(\"/jointset/j1/q1/value\", [0, 2*math.pi]) guess.setState(\"/jointset/j0/q0/speed\",", "solution.write(\"examplePredictAndTrack_predict_solution.sto\") if visualize: study.visualize(solution) return solution def computeMarkersReference(predictedSolution): model =", "model.addJoint(j1) tau0 = osim.CoordinateActuator() tau0.setCoordinate(j0.updCoordinate()) tau0.setName(\"tau0\") tau0.setOptimalForce(1) model.addComponent(tau0) tau1 =", "return solution optimalTrajectory = solvePrediction() markersRef = computeMarkersReference(optimalTrajectory) trackedSolution =", "[0, 0]) guess.setControl(\"/tau0\", [0, 0]) guess.setControl(\"/tau1\", [0, 0]) guess.resampleWithNumTimes(10) solver.setGuess(guess)", "------------------------------------ def createDoublePendulumModel(): model = osim.Model() model.setName(\"double_pendulum\") # Create two", "Add markers to body origin locations. m0 = osim.Marker(\"m0\", b0,", "problem.addGoal(finalCost) # Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(100) solver.set_verbosity(2)", "0, 0)) b0Center = osim.PhysicalOffsetFrame(\"b0_center\", b0, transform) b0.addComponent(b0Center) b0Center.attachGeometry(bodyGeometry.clone()) b1Center", "problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_markers_solution.sto\") if visualize: study.visualize(solution) return solution", "0]) guess.setControl(\"/tau1\", [0, 0]) guess.resampleWithNumTimes(10) solver.setGuess(guess) # Save the problem", "os import math import opensim as osim \"\"\" This file", "solution = study.solve() solution.write(\"examplePredictAndTrack_track_markers_solution.sto\") if visualize: study.visualize(solution) return solution optimalTrajectory", "are name, [lower bound, upper bound], # initial [lower bound,", "of a double pendulum. # ------------------------------------ def createDoublePendulumModel(): model =", "In the diagram below, + represents the origin, and ---o", "effort.setWeight(0.0001) # problem.addGoal(effort) # Configure the solver. solver = study.initTropterSolver()", "swing-up. study = osim.MocoStudy() study.setName(\"double_pendulum_track\") problem = study.updProblem() # Model", "obtain a # # copy of the License at http://www.apache.org/licenses/LICENSE-2.0", "# import os import math import opensim as osim \"\"\"", "b0 = osim.Body(\"b0\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b0) b1 = osim.Body(\"b1\",", "problem.addGoal(markerTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.0001) # problem.addGoal(effort) # Configure", "# not use this file except in compliance with the", "100]) # Cost: track provided marker data. markerTracking = osim.MocoMarkerTrackingGoal()", "\"License\"); you may # # not use this file except", "name, [lower bound, upper bound], # initial [lower bound, upper", "study.visualize(solution) return solution optimalTrajectory = solvePrediction() markersRef = computeMarkersReference(optimalTrajectory) trackedSolution", "effector position. ftCost = osim.MocoFinalTimeGoal() ftCost.setWeight(0.001) problem.addGoal(ftCost) finalCost = osim.MocoMarkerFinalGoal()", "solver = study.initTropterSolver() solver.set_num_mesh_intervals(100) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") guess = solver.createGuess() guess.setNumTimes(2)", "1)) markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m1\", 5)) return osim.MarkersReference(markerTrajectories, markerWeights) def solveStateTracking(stateRef): # Predict", "stateTracking = osim.MocoStateTrackingGoal() stateTracking.setReference(osim.TableProcessor(stateRef)) problem.addGoal(stateTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.001)", "if visualize: study.visualize(solution) return solution def computeMarkersReference(predictedSolution): model = createDoublePendulumModel()", "# Create a model of a double pendulum. # ------------------------------------", "= j0.updCoordinate() q0.setName(\"q0\") j1 = osim.PinJoint(\"j1\", b0, osim.Vec3(0), osim.Vec3(0), b1,", "a model of a double pendulum. # ------------------------------------ def createDoublePendulumModel():", "solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") # Save the problem to a setup file", "= False # Create a model of a double pendulum.", "if visualize: study.visualize(solution) return solution def solveMarkerTracking(markersRef, guess): # Predict", "= osim.Body(\"b1\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b1) # Add markers to", "j1.updCoordinate() q1.setName(\"q1\") model.addJoint(j0) model.addJoint(j1) tau0 = osim.CoordinateActuator() tau0.setCoordinate(j0.updCoordinate()) tau0.setName(\"tau0\") tau0.setOptimalForce(1)", "the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\")", "# | # o # | # +---o---o + #", "10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0, 0) problem.setControlInfo(\"/tau0\", [-100, 100])", "markersRef.getMarkerTable().getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50],", "problem. solution = study.solve() solution.write(\"examplePredictAndTrack_predict_solution.sto\") if visualize: study.visualize(solution) return solution", "a setup file for reference. study.printToXML(\"examplePredictAndTrack_track_markers.omoco\") # Solve the problem.", "osim.MocoStateTrackingGoal() stateTracking.setReference(osim.TableProcessor(stateRef)) problem.addGoal(stateTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.001) # TODO", "osim.PinJoint(\"j0\", model.getGround(), osim.Vec3(0), osim.Vec3(0), b0, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q0", "# origin, and moments and products of inertia of zero.", "the License. You may obtain a # # copy of", "track the states from the optimal trajectory, and 3. track", "= stateRef.getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50,", "the diagram below, + represents the origin, and ---o represents", "solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(100) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") guess = solver.createGuess()", "\"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF", "Add display geometry. bodyGeometry = osim.Ellipsoid(0.5, 0.1, 0.1) transform =", "if os.getenv('OPENSIM_USE_VISUALIZER') == '0': visualize = False # Create a", "marker trajectories from the optimal trajectory. \"\"\" visualize = True", "position. ftCost = osim.MocoFinalTimeGoal() ftCost.setWeight(0.001) problem.addGoal(ftCost) finalCost = osim.MocoMarkerFinalGoal() finalCost.setName(\"final\")", "osim.MocoFinalTimeGoal() ftCost.setWeight(0.001) problem.addGoal(ftCost) finalCost = osim.MocoMarkerFinalGoal() finalCost.setName(\"final\") finalCost.setWeight(1000.0) finalCost.setPointName(\"/markerset/m1\") finalCost.setReferenceLocation(osim.Vec3(0,", "q1 = j1.updCoordinate() q1.setName(\"q1\") model.addJoint(j0) model.addJoint(j1) tau0 = osim.CoordinateActuator() tau0.setCoordinate(j0.updCoordinate())", "guess.setTime([0, 1]) guess.setState(\"/jointset/j0/q0/value\", [0, -math.pi]) guess.setState(\"/jointset/j1/q1/value\", [0, 2*math.pi]) guess.setState(\"/jointset/j0/q0/speed\", [0,", "double pendulum model: 1. predict an optimal trajectory (and controls),", "= study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. problem.setTimeBounds(0, [0,", "under the License is distributed on an \"AS IS\" BASIS,", "0), osim.Vec3(0)) q0 = j0.updCoordinate() q0.setName(\"q0\") j1 = osim.PinJoint(\"j1\", b0,", "model = osim.Model() model.setName(\"double_pendulum\") # Create two links, each with", "1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b1) # Add markers to body origin", "[-50, 50], 0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100]) #", "study.printToXML(\"examplePredictAndTrack_predict.omoco\") # Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_predict_solution.sto\") if", "= osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.0001) # problem.addGoal(effort) # Configure the solver.", "0, 0), osim.Vec3(0)) q0 = j0.updCoordinate() q0.setName(\"q0\") j1 = osim.PinJoint(\"j1\",", "data. markerTracking = osim.MocoMarkerTrackingGoal() markerTracking.setMarkersReference(markersRef) problem.addGoal(markerTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\")", "the License at http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required", "the License. # # -------------------------------------------------------------------------- # import os import math", "controls), 2. track the states from the optimal trajectory, and", "osim.Inertia(1)) model.addBody(b0) b1 = osim.Body(\"b1\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b1) #", "= study.initTropterSolver() solver.set_num_mesh_intervals(100) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") guess = solver.createGuess() guess.setNumTimes(2) guess.setTime([0,", "minimum time swing-up. study = osim.MocoStudy() study.setName(\"double_pendulum_track\") problem = study.updProblem()", "return model def solvePrediction(): # Predict the optimal trajectory for", "this file except in compliance with the License. You may", "math import opensim as osim \"\"\" This file performs the", "pendulum model: 1. predict an optimal trajectory (and controls), 2.", "0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost: track", "effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.0001) # problem.addGoal(effort) # Configure the", "model = createDoublePendulumModel() model.initSystem() states = predictedSolution.exportToStatesTable() statesTraj = osim.StatesTrajectory.createFromStatesTable(model,", "problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0, 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\",", "# Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. # Arguments are name,", "display geometry. bodyGeometry = osim.Ellipsoid(0.5, 0.1, 0.1) transform = osim.Transform(osim.Vec3(-0.5,", "Moco: examplePredictAndTrack.py # # -------------------------------------------------------------------------- # # Copyright (c) 2018", "# -------------------------------------------------------------------------- # import os import math import opensim as", "links, each with a mass of 1 kg, center of", "study.solve() solution.write(\"examplePredictAndTrack_predict_solution.sto\") if visualize: study.visualize(solution) return solution def computeMarkersReference(predictedSolution): model", "Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\")", "origin, and moments and products of inertia of zero. b0", "visualize: study.visualize(solution) return solution def solveMarkerTracking(markersRef, guess): # Predict the", "# limitations under the License. # # -------------------------------------------------------------------------- # import", "osim.SetMarkerWeights() markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m0\", 1)) markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m1\", 5)) return osim.MarkersReference(markerTrajectories, markerWeights) def solveStateTracking(stateRef):", "# study = osim.MocoStudy() study.setName(\"double_pendulum_predict\") problem = study.updProblem() # Model", "model.addMarker(m0) model.addMarker(m1) # Connect the bodies with pin joints. Assume", "Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_states_solution.sto\") if visualize: study.visualize(solution)", "finalCost.setReferenceLocation(osim.Vec3(0, 2, 0)) problem.addGoal(finalCost) # Configure the solver. solver =", "m0 = osim.Marker(\"m0\", b0, osim.Vec3(0)) m1 = osim.Marker(\"m1\", b1, osim.Vec3(0))", "2.0 (the \"License\"); you may # # not use this", "study.setName(\"double_pendulum_track\") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds.", "geometry. bodyGeometry = osim.Ellipsoid(0.5, 0.1, 0.1) transform = osim.Transform(osim.Vec3(-0.5, 0,", "+ represents the origin, and ---o represents a link #", "solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") # Save the problem to a", "# | # +---o---o + # # iniital pose final", "problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0) problem.setControlInfo(\"/tau0\", [-150, 150]) problem.setControlInfo(\"/tau1\", [-150, 150])", "tau0.setName(\"tau0\") tau0.setOptimalForce(1) model.addComponent(tau0) tau1 = osim.CoordinateActuator() tau1.setCoordinate(j1.updCoordinate()) tau1.setName(\"tau1\") tau1.setOptimalForce(1) model.addComponent(tau1)", "time swing-up. study = osim.MocoStudy() study.setName(\"double_pendulum_track\") problem = study.updProblem() #", "# Arguments are name, [lower bound, upper bound], # initial", "data. stateTracking = osim.MocoStateTrackingGoal() stateTracking.setReference(osim.TableProcessor(stateRef)) problem.addGoal(stateTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\")", "+ # # iniital pose final pose # study =", "# Connect the bodies with pin joints. Assume each body", "file except in compliance with the License. You may obtain", "study.visualize(solution) return solution def computeMarkersReference(predictedSolution): model = createDoublePendulumModel() model.initSystem() states", "License. You may obtain a # # copy of the", "mass of 1 kg, center of mass at the body's", "b1Center = osim.PhysicalOffsetFrame(\"b1_center\", b1, transform) b1.addComponent(b1Center) b1Center.attachGeometry(bodyGeometry.clone()) model.finalizeConnections() model.printToXML(\"double_pendulum.osim\") return", "for a minimum time swing-up. study = osim.MocoStudy() study.setName(\"double_pendulum_track\") problem", "m long. j0 = osim.PinJoint(\"j0\", model.getGround(), osim.Vec3(0), osim.Vec3(0), b0, osim.Vec3(-1,", "# Cost: track provided state data. stateTracking = osim.MocoStateTrackingGoal() stateTracking.setReference(osim.TableProcessor(stateRef))", "and 3. track the marker trajectories from the optimal trajectory.", "osim.Transform(osim.Vec3(-0.5, 0, 0)) b0Center = osim.PhysicalOffsetFrame(\"b0_center\", b0, transform) b0.addComponent(b0Center) b0Center.attachGeometry(bodyGeometry.clone())", "b1, osim.Vec3(0)) model.addMarker(m0) model.addMarker(m1) # Connect the bodies with pin", "True # The following environment variable is set during automated", "0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0) problem.setControlInfo(\"/tau0\",", "in writing, software # # distributed under the License is", "False # Create a model of a double pendulum. #", "pendulum. # ------------------------------------ def createDoublePendulumModel(): model = osim.Model() model.setName(\"double_pendulum\") #", "under the Apache License, Version 2.0 (the \"License\"); you may", "License at http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by", "osim.Vec3(0)) q1 = j1.updCoordinate() q1.setName(\"q1\") model.addJoint(j0) model.addJoint(j1) tau0 = osim.CoordinateActuator()", "bound, upper bound]. problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50],", "50], 0, 0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100]) #", "10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0) problem.setControlInfo(\"/tau0\", [-150, 150]) problem.setControlInfo(\"/tau1\",", "tau1.setCoordinate(j1.updCoordinate()) tau1.setName(\"tau1\") tau1.setOptimalForce(1) model.addComponent(tau1) # Add display geometry. bodyGeometry =", "0]) guess.setState(\"/jointset/j1/q1/speed\", [0, 0]) guess.setControl(\"/tau0\", [0, 0]) guess.setControl(\"/tau1\", [0, 0])", "osim.MarkersReference(markerTrajectories, markerWeights) def solveStateTracking(stateRef): # Predict the optimal trajectory for", "# OpenSim Moco: examplePredictAndTrack.py # # -------------------------------------------------------------------------- # # Copyright", "transform = osim.Transform(osim.Vec3(-0.5, 0, 0)) b0Center = osim.PhysicalOffsetFrame(\"b0_center\", b0, transform)", "osim.StatesTrajectory.createFromStatesTable(model, states) markerTrajectories = osim.TimeSeriesTableVec3() markerTrajectories.setColumnLabels([\"/markerset/m0\", \"/markerset/m1\"]) for state in", "markerTrajectories.setColumnLabels([\"/markerset/m0\", \"/markerset/m1\"]) for state in statesTraj: model.realizePosition(state) m0 = model.getComponent(\"markerset/m0\")", "study.visualize(solution) return solution def solveMarkerTracking(markersRef, guess): # Predict the optimal", "-------------------------------------------------------------------------- # # Copyright (c) 2018 Stanford University and the", "problem.setModel(createDoublePendulumModel()) # Bounds. # Arguments are name, [lower bound, upper", "study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") # Save the", "150]) problem.setControlInfo(\"/tau1\", [-150, 150]) # Cost: track provided state data.", "Cost: track provided state data. stateTracking = osim.MocoStateTrackingGoal() stateTracking.setReference(osim.TableProcessor(stateRef)) problem.addGoal(stateTracking)", "the double pendulum. # # o # | # o", "= osim.MocoStateTrackingGoal() stateTracking.setReference(osim.TableProcessor(stateRef)) problem.addGoal(stateTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.001) #", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "See the License for the specific language governing permissions and", "osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q1 = j1.updCoordinate()", "problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0, 0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100,", "finalCost = osim.MocoMarkerFinalGoal() finalCost.setName(\"final\") finalCost.setWeight(1000.0) finalCost.setPointName(\"/markerset/m1\") finalCost.setReferenceLocation(osim.Vec3(0, 2, 0)) problem.addGoal(finalCost)", "b1, transform) b1.addComponent(b1Center) b1Center.attachGeometry(bodyGeometry.clone()) model.finalizeConnections() model.printToXML(\"double_pendulum.osim\") return model def solvePrediction():", "center of mass at the body's # origin, and moments", "b0, osim.Vec3(0)) m1 = osim.Marker(\"m1\", b1, osim.Vec3(0)) model.addMarker(m0) model.addMarker(m1) #", "# See the License for the specific language governing permissions", "Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. # Arguments are name, [lower", "# Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_predict_solution.sto\") if visualize:", "the marker trajectories from the optimal trajectory. \"\"\" visualize =", "markerTracking.setMarkersReference(markersRef) problem.addGoal(markerTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.0001) # problem.addGoal(effort) #", "required by applicable law or agreed to in writing, software", "= osim.Marker(\"m0\", b0, osim.Vec3(0)) m1 = osim.Marker(\"m1\", b1, osim.Vec3(0)) model.addMarker(m0)", "osim.Vec3(0)) model.addMarker(m0) model.addMarker(m1) # Connect the bodies with pin joints.", "= osim.CoordinateActuator() tau1.setCoordinate(j1.updCoordinate()) tau1.setName(\"tau1\") tau1.setOptimalForce(1) model.addComponent(tau1) # Add display geometry.", "a minimum time swing-up. # In the diagram below, +", "University and the Authors # # # # Author(s): <NAME>", "Cost: minimize final time and error from desired # end", "model.getComponent(\"markerset/m1\") markerTrajectories.appendRow(state.getTime(), osim.RowVectorVec3([m0.getLocationInGround(state), m1.getLocationInGround(state)])) # Assign a weight to each", "iniital pose final pose # study = osim.MocoStudy() study.setName(\"double_pendulum_predict\") problem", "= study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. # Arguments", "using a double pendulum model: 1. predict an optimal trajectory", "the bodies with pin joints. Assume each body is 1", "model.getComponent(\"markerset/m0\") m1 = model.getComponent(\"markerset/m1\") markerTrajectories.appendRow(state.getTime(), osim.RowVectorVec3([m0.getLocationInGround(state), m1.getLocationInGround(state)])) # Assign a", "automated testing. if os.getenv('OPENSIM_USE_VISUALIZER') == '0': visualize = False #", "bound], # final [lower bound, upper bound]. finalTime = stateRef.getIndependentColumn()[-1]", "problem.setControlInfo(\"/tau1\", [-150, 150]) # Cost: track provided state data. stateTracking", "0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0, 0)", "# problem.addGoal(effort) # Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(50)", "= solver.createGuess() guess.setNumTimes(2) guess.setTime([0, 1]) guess.setState(\"/jointset/j0/q0/value\", [0, -math.pi]) guess.setState(\"/jointset/j1/q1/value\", [0,", "solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") # Save the problem to a setup", "track the marker trajectories from the optimal trajectory. \"\"\" visualize", "file for reference. study.printToXML(\"examplePredictAndTrack_predict.omoco\") # Solve the problem. solution =", "and # # limitations under the License. # # --------------------------------------------------------------------------", "0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0, 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0)", "osim.PinJoint(\"j1\", b0, osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q1", "setup file for reference. study.printToXML(\"examplePredictAndTrack_predict.omoco\") # Solve the problem. solution", "optimal trajectory (and controls), 2. track the states from the", "the states from the optimal trajectory, and 3. track the", "= osim.Body(\"b0\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b0) b1 = osim.Body(\"b1\", 1,", "# final [lower bound, upper bound]. problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0)", "osim.Vec3(0), osim.Vec3(0), b0, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q0 = j0.updCoordinate()", "each with a mass of 1 kg, center of mass", "problem.addGoal(effort) # Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2)", "visualize: study.visualize(solution) return solution optimalTrajectory = solvePrediction() markersRef = computeMarkersReference(optimalTrajectory)", "distributed under the License is distributed on an \"AS IS\"", "state in statesTraj: model.realizePosition(state) m0 = model.getComponent(\"markerset/m0\") m1 = model.getComponent(\"markerset/m1\")", "0]) guess.resampleWithNumTimes(10) solver.setGuess(guess) # Save the problem to a setup", "stateRef.getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50],", "Authors # # # # Author(s): <NAME> # # #", "def computeMarkersReference(predictedSolution): model = createDoublePendulumModel() model.initSystem() states = predictedSolution.exportToStatesTable() statesTraj", "compliance with the License. You may obtain a # #", "solution def computeMarkersReference(predictedSolution): model = createDoublePendulumModel() model.initSystem() states = predictedSolution.exportToStatesTable()", "= osim.MocoStudy() study.setName(\"double_pendulum_track\") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel())", "markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m0\", 1)) markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m1\", 5)) return osim.MarkersReference(markerTrajectories, markerWeights) def solveStateTracking(stateRef): #", "= osim.Marker(\"m1\", b1, osim.Vec3(0)) model.addMarker(m0) model.addMarker(m1) # Connect the bodies", "[0, 5]) # Arguments are name, [lower bound, upper bound],", "study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. # Arguments are", "and the Authors # # # # Author(s): <NAME> #", "b0, transform) b0.addComponent(b0Center) b0Center.attachGeometry(bodyGeometry.clone()) b1Center = osim.PhysicalOffsetFrame(\"b1_center\", b1, transform) b1.addComponent(b1Center)", "# end effector position. ftCost = osim.MocoFinalTimeGoal() ftCost.setWeight(0.001) problem.addGoal(ftCost) finalCost", "study.initTropterSolver() solver.set_num_mesh_intervals(100) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") guess = solver.createGuess() guess.setNumTimes(2) guess.setTime([0, 1])", "reference. study.printToXML(\"examplePredictAndTrack_predict.omoco\") # Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_predict_solution.sto\")", "markersRef = computeMarkersReference(optimalTrajectory) trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable()) trackedSolution2 = solveMarkerTracking(markersRef, trackedSolution)", "The following environment variable is set during automated testing. if", "in compliance with the License. You may obtain a #", "Save the problem to a setup file for reference. study.printToXML(\"examplePredictAndTrack_predict.omoco\")", "# final [lower bound, upper bound]. finalTime = stateRef.getIndependentColumn()[-1] problem.setTimeBounds(0,", "solveStateTracking(stateRef): # Predict the optimal trajectory for a minimum time", "osim.Vec3(0), osim.Inertia(1)) model.addBody(b0) b1 = osim.Body(\"b1\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b1)", "not use this file except in compliance with the License.", "tau1.setOptimalForce(1) model.addComponent(tau1) # Add display geometry. bodyGeometry = osim.Ellipsoid(0.5, 0.1,", "# # distributed under the License is distributed on an", "Predict the optimal trajectory for a minimum time swing-up. study", "Create two links, each with a mass of 1 kg,", "[-50, 50], 0, 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50,", "solver.set_exact_hessian_block_sparsity_mode(\"dense\") # Save the problem to a setup file for", "0.1, 0.1) transform = osim.Transform(osim.Vec3(-0.5, 0, 0)) b0Center = osim.PhysicalOffsetFrame(\"b0_center\",", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "model: 1. predict an optimal trajectory (and controls), 2. track", "[0, 0]) guess.setControl(\"/tau1\", [0, 0]) guess.resampleWithNumTimes(10) solver.setGuess(guess) # Save the", "for reference. study.printToXML(\"examplePredictAndTrack_track_markers.omoco\") # Solve the problem. solution = study.solve()", "osim.Vec3(0), b1, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q1 = j1.updCoordinate() q1.setName(\"q1\")", "predictedSolution.exportToStatesTable() statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states) markerTrajectories = osim.TimeSeriesTableVec3() markerTrajectories.setColumnLabels([\"/markerset/m0\", \"/markerset/m1\"])", "solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") solver.setGuess(guess) # Save the problem to a setup", "use this file except in compliance with the License. You", "tau0 = osim.CoordinateActuator() tau0.setCoordinate(j0.updCoordinate()) tau0.setName(\"tau0\") tau0.setOptimalForce(1) model.addComponent(tau0) tau1 = osim.CoordinateActuator()", "solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") # Save the problem", "problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost: track provided", "in statesTraj: model.realizePosition(state) m0 = model.getComponent(\"markerset/m0\") m1 = model.getComponent(\"markerset/m1\") markerTrajectories.appendRow(state.getTime(),", "Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(100) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") guess", "problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost: track provided marker data. markerTracking", "# Save the problem to a setup file for reference.", "pin joints. Assume each body is 1 m long. j0", "solver.setGuess(guess) # Save the problem to a setup file for", "'0': visualize = False # Create a model of a", "[lower bound, upper bound]. finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\",", "\"/markerset/m1\"]) for state in statesTraj: model.realizePosition(state) m0 = model.getComponent(\"markerset/m0\") m1", "problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10,", "# The following environment variable is set during automated testing.", "[-50, 50], 0) problem.setControlInfo(\"/tau0\", [-150, 150]) problem.setControlInfo(\"/tau1\", [-150, 150]) #", "10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0, 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10],", "b0.addComponent(b0Center) b0Center.attachGeometry(bodyGeometry.clone()) b1Center = osim.PhysicalOffsetFrame(\"b1_center\", b1, transform) b1.addComponent(b1Center) b1Center.attachGeometry(bodyGeometry.clone()) model.finalizeConnections()", "Predict the optimal trajectory for a minimum time swing-up. #", "Copyright (c) 2018 Stanford University and the Authors # #", "solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") solver.setGuess(guess) # Save the", "0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost: minimize", "CONDITIONS OF ANY KIND, either express or implied. # #", "with a mass of 1 kg, center of mass at", "osim \"\"\" This file performs the following problems using a", "License for the specific language governing permissions and # #", "q0.setName(\"q0\") j1 = osim.PinJoint(\"j1\", b0, osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1, 0,", "for reference. study.printToXML(\"examplePredictAndTrack_track_states.omoco\") # Solve the problem. solution = study.solve()", "# Add markers to body origin locations. m0 = osim.Marker(\"m0\",", "# Predict the optimal trajectory for a minimum time swing-up.", "Save the problem to a setup file for reference. study.printToXML(\"examplePredictAndTrack_track_states.omoco\")", "study.solve() solution.write(\"examplePredictAndTrack_track_states_solution.sto\") if visualize: study.visualize(solution) return solution def solveMarkerTracking(markersRef, guess):", "= osim.TimeSeriesTableVec3() markerTrajectories.setColumnLabels([\"/markerset/m0\", \"/markerset/m1\"]) for state in statesTraj: model.realizePosition(state) m0", "model def solvePrediction(): # Predict the optimal trajectory for a", "the problem to a setup file for reference. study.printToXML(\"examplePredictAndTrack_track_markers.omoco\") #", "ANY KIND, either express or implied. # # See the", "problem.setTimeBounds(0, [0, 5]) # Arguments are name, [lower bound, upper", "This file performs the following problems using a double pendulum", "<reponame>mcx/opensim-core # -------------------------------------------------------------------------- # # OpenSim Moco: examplePredictAndTrack.py # #", "= osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.001) # TODO problem.addGoal(effort) # Configure the", "the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_states_solution.sto\") if visualize: study.visualize(solution) return", "return solution def solveMarkerTracking(markersRef, guess): # Predict the optimal trajectory", "Assume each body is 1 m long. j0 = osim.PinJoint(\"j0\",", "double pendulum. # # o # | # o #", "finalCost.setPointName(\"/markerset/m1\") finalCost.setReferenceLocation(osim.Vec3(0, 2, 0)) problem.addGoal(finalCost) # Configure the solver. solver", "[-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost: track provided marker", "q1.setName(\"q1\") model.addJoint(j0) model.addJoint(j1) tau0 = osim.CoordinateActuator() tau0.setCoordinate(j0.updCoordinate()) tau0.setName(\"tau0\") tau0.setOptimalForce(1) model.addComponent(tau0)", "the Apache License, Version 2.0 (the \"License\"); you may #", "problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0) problem.setControlInfo(\"/tau0\", [-100,", "swing-up. # In the diagram below, + represents the origin,", "150]) # Cost: track provided state data. stateTracking = osim.MocoStateTrackingGoal()", "OR CONDITIONS OF ANY KIND, either express or implied. #", "= osim.MocoStudy() study.setName(\"double_pendulum_predict\") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel())", "= osim.Model() model.setName(\"double_pendulum\") # Create two links, each with a", "bound]. finalTime = stateRef.getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0)", "osim.CoordinateActuator() tau0.setCoordinate(j0.updCoordinate()) tau0.setName(\"tau0\") tau0.setOptimalForce(1) model.addComponent(tau0) tau1 = osim.CoordinateActuator() tau1.setCoordinate(j1.updCoordinate()) tau1.setName(\"tau1\")", "the License is distributed on an \"AS IS\" BASIS, #", "Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_predict_solution.sto\") if visualize: study.visualize(solution)", "# # # # Unless required by applicable law or", "distributed on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES", "# TODO problem.addGoal(effort) # Configure the solver. solver = study.initTropterSolver()", "study.solve() solution.write(\"examplePredictAndTrack_track_markers_solution.sto\") if visualize: study.visualize(solution) return solution optimalTrajectory = solvePrediction()", "tau1 = osim.CoordinateActuator() tau1.setCoordinate(j1.updCoordinate()) tau1.setName(\"tau1\") tau1.setOptimalForce(1) model.addComponent(tau1) # Add display", "solution optimalTrajectory = solvePrediction() markersRef = computeMarkersReference(optimalTrajectory) trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable())", "# Create two links, each with a mass of 1", "is 1 m long. j0 = osim.PinJoint(\"j0\", model.getGround(), osim.Vec3(0), osim.Vec3(0),", "def solveMarkerTracking(markersRef, guess): # Predict the optimal trajectory for a", "solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") guess = solver.createGuess() guess.setNumTimes(2) guess.setTime([0, 1]) guess.setState(\"/jointset/j0/q0/value\", [0,", "of inertia of zero. b0 = osim.Body(\"b0\", 1, osim.Vec3(0), osim.Inertia(1))", "tau0.setOptimalForce(1) model.addComponent(tau0) tau1 = osim.CoordinateActuator() tau1.setCoordinate(j1.updCoordinate()) tau1.setName(\"tau1\") tau1.setOptimalForce(1) model.addComponent(tau1) #", "tau1.setName(\"tau1\") tau1.setOptimalForce(1) model.addComponent(tau1) # Add display geometry. bodyGeometry = osim.Ellipsoid(0.5,", "50], 0) problem.setControlInfo(\"/tau0\", [-150, 150]) problem.setControlInfo(\"/tau1\", [-150, 150]) # Cost:", "# Author(s): <NAME> # # # # Licensed under the", "0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\",", "problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost: minimize final time and error", "Stanford University and the Authors # # # # Author(s):", "solveMarkerTracking(markersRef, guess): # Predict the optimal trajectory for a minimum", "0.1) transform = osim.Transform(osim.Vec3(-0.5, 0, 0)) b0Center = osim.PhysicalOffsetFrame(\"b0_center\", b0,", "guess.resampleWithNumTimes(10) solver.setGuess(guess) # Save the problem to a setup file", "model.addMarker(m1) # Connect the bodies with pin joints. Assume each", "joints. Assume each body is 1 m long. j0 =", "b0, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q0 = j0.updCoordinate() q0.setName(\"q0\") j1", "= model.getComponent(\"markerset/m0\") m1 = model.getComponent(\"markerset/m1\") markerTrajectories.appendRow(state.getTime(), osim.RowVectorVec3([m0.getLocationInGround(state), m1.getLocationInGround(state)])) # Assign", "model.addJoint(j0) model.addJoint(j1) tau0 = osim.CoordinateActuator() tau0.setCoordinate(j0.updCoordinate()) tau0.setName(\"tau0\") tau0.setOptimalForce(1) model.addComponent(tau0) tau1", "# # OpenSim Moco: examplePredictAndTrack.py # # -------------------------------------------------------------------------- # #", "Bounds. # Arguments are name, [lower bound, upper bound], #", "transform) b0.addComponent(b0Center) b0Center.attachGeometry(bodyGeometry.clone()) b1Center = osim.PhysicalOffsetFrame(\"b1_center\", b1, transform) b1.addComponent(b1Center) b1Center.attachGeometry(bodyGeometry.clone())", "study = osim.MocoStudy() study.setName(\"double_pendulum_track\") problem = study.updProblem() # Model (dynamics).", "track provided marker data. markerTracking = osim.MocoMarkerTrackingGoal() markerTracking.setMarkersReference(markersRef) problem.addGoal(markerTracking) effort", "osim.Vec3(0), osim.Inertia(1)) model.addBody(b1) # Add markers to body origin locations.", "trajectory. \"\"\" visualize = True # The following environment variable", "50], 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0)", "0)) problem.addGoal(finalCost) # Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(100)", "solvePrediction(): # Predict the optimal trajectory for a minimum time", "pose final pose # study = osim.MocoStudy() study.setName(\"double_pendulum_predict\") problem =", "solver = study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") #", "m1 = osim.Marker(\"m1\", b1, osim.Vec3(0)) model.addMarker(m0) model.addMarker(m1) # Connect the", "= j1.updCoordinate() q1.setName(\"q1\") model.addJoint(j0) model.addJoint(j1) tau0 = osim.CoordinateActuator() tau0.setCoordinate(j0.updCoordinate()) tau0.setName(\"tau0\")", "markerWeights) def solveStateTracking(stateRef): # Predict the optimal trajectory for a", "osim.Model() model.setName(\"double_pendulum\") # Create two links, each with a mass", "final [lower bound, upper bound]. finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime)", "# -------------------------------------------------------------------------- # # OpenSim Moco: examplePredictAndTrack.py # # --------------------------------------------------------------------------", "osim.CoordinateActuator() tau1.setCoordinate(j1.updCoordinate()) tau1.setName(\"tau1\") tau1.setOptimalForce(1) model.addComponent(tau1) # Add display geometry. bodyGeometry", "problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100])", "study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. problem.setTimeBounds(0, [0, 5])", "Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_markers_solution.sto\") if visualize: study.visualize(solution)", "= osim.PhysicalOffsetFrame(\"b0_center\", b0, transform) b0.addComponent(b0Center) b0Center.attachGeometry(bodyGeometry.clone()) b1Center = osim.PhysicalOffsetFrame(\"b1_center\", b1,", "# o # | # o # | # +---o---o", "solver.set_num_mesh_intervals(100) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") guess = solver.createGuess() guess.setNumTimes(2) guess.setTime([0, 1]) guess.setState(\"/jointset/j0/q0/value\",", "upper bound], # final [lower bound, upper bound]. finalTime =", "weight to each marker. markerWeights = osim.SetMarkerWeights() markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m0\", 1)) markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m1\",", "# # iniital pose final pose # study = osim.MocoStudy()", "= model.getComponent(\"markerset/m1\") markerTrajectories.appendRow(state.getTime(), osim.RowVectorVec3([m0.getLocationInGround(state), m1.getLocationInGround(state)])) # Assign a weight to", "optimalTrajectory = solvePrediction() markersRef = computeMarkersReference(optimalTrajectory) trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable()) trackedSolution2", "0)) b0Center = osim.PhysicalOffsetFrame(\"b0_center\", b0, transform) b0.addComponent(b0Center) b0Center.attachGeometry(bodyGeometry.clone()) b1Center =", "upper bound]. problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0,", "= osim.PhysicalOffsetFrame(\"b1_center\", b1, transform) b1.addComponent(b1Center) b1Center.attachGeometry(bodyGeometry.clone()) model.finalizeConnections() model.printToXML(\"double_pendulum.osim\") return model", "Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. problem.setTimeBounds(0, [0, 5]) # Arguments", "bound]. finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0)", "solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") solver.setGuess(guess) # Save the problem to", "each body is 1 m long. j0 = osim.PinJoint(\"j0\", model.getGround(),", "\"\"\" visualize = True # The following environment variable is", "# Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_states_solution.sto\") if visualize:", "createDoublePendulumModel(): model = osim.Model() model.setName(\"double_pendulum\") # Create two links, each", "below, + represents the origin, and ---o represents a link", "osim.PhysicalOffsetFrame(\"b0_center\", b0, transform) b0.addComponent(b0Center) b0Center.attachGeometry(bodyGeometry.clone()) b1Center = osim.PhysicalOffsetFrame(\"b1_center\", b1, transform)", "states = predictedSolution.exportToStatesTable() statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states) markerTrajectories = osim.TimeSeriesTableVec3()", "body is 1 m long. j0 = osim.PinJoint(\"j0\", model.getGround(), osim.Vec3(0),", "to a setup file for reference. study.printToXML(\"examplePredictAndTrack_track_states.omoco\") # Solve the", "KIND, either express or implied. # # See the License", "specific language governing permissions and # # limitations under the", "optimal trajectory, and 3. track the marker trajectories from the", "a weight to each marker. markerWeights = osim.SetMarkerWeights() markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m0\", 1))", "problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0) problem.setControlInfo(\"/tau0\", [-150,", "IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "# o # | # +---o---o + # # iniital", "[-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0, 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10,", "upper bound]. finalTime = stateRef.getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10],", "governing permissions and # # limitations under the License. #", "bodies with pin joints. Assume each body is 1 m", "(dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. problem.setTimeBounds(0, [0, 5]) # Arguments are", "pendulum. # # o # | # o # |", "the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(100) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") guess =", "solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") solver.setGuess(guess) # Save the problem to a", "tau0.setCoordinate(j0.updCoordinate()) tau0.setName(\"tau0\") tau0.setOptimalForce(1) model.addComponent(tau0) tau1 = osim.CoordinateActuator() tau1.setCoordinate(j1.updCoordinate()) tau1.setName(\"tau1\") tau1.setOptimalForce(1)", "inertia of zero. b0 = osim.Body(\"b0\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b0)", "[-100, 100]) # Cost: track provided marker data. markerTracking =", "long. j0 = osim.PinJoint(\"j0\", model.getGround(), osim.Vec3(0), osim.Vec3(0), b0, osim.Vec3(-1, 0,", "[0, 0]) guess.resampleWithNumTimes(10) solver.setGuess(guess) # Save the problem to a", "# iniital pose final pose # study = osim.MocoStudy() study.setName(\"double_pendulum_predict\")", "50], 0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost:", "and error from desired # end effector position. ftCost =", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "# # -------------------------------------------------------------------------- # import os import math import opensim", "diagram below, + represents the origin, and ---o represents a", "model.addComponent(tau0) tau1 = osim.CoordinateActuator() tau1.setCoordinate(j1.updCoordinate()) tau1.setName(\"tau1\") tau1.setOptimalForce(1) model.addComponent(tau1) # Add", "upper bound], # initial [lower bound, upper bound], # final", "osim.MocoStudy() study.setName(\"double_pendulum_track\") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) #", "osim.Vec3(-1, 0, 0), osim.Vec3(0)) q1 = j1.updCoordinate() q1.setName(\"q1\") model.addJoint(j0) model.addJoint(j1)", "upper bound]. finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10],", "= osim.MocoFinalTimeGoal() ftCost.setWeight(0.001) problem.addGoal(ftCost) finalCost = osim.MocoMarkerFinalGoal() finalCost.setName(\"final\") finalCost.setWeight(1000.0) finalCost.setPointName(\"/markerset/m1\")", "+---o---o + # # iniital pose final pose # study", "osim.Body(\"b0\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b0) b1 = osim.Body(\"b1\", 1, osim.Vec3(0),", "# # Licensed under the Apache License, Version 2.0 (the", "solution def solveMarkerTracking(markersRef, guess): # Predict the optimal trajectory for", "time swing-up. # In the diagram below, + represents the", "problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0, 0) problem.setControlInfo(\"/tau0\",", "is distributed on an \"AS IS\" BASIS, # # WITHOUT", "import os import math import opensim as osim \"\"\" This", "b1Center.attachGeometry(bodyGeometry.clone()) model.finalizeConnections() model.printToXML(\"double_pendulum.osim\") return model def solvePrediction(): # Predict the", "minimize final time and error from desired # end effector", "the Authors # # # # Author(s): <NAME> # #", "b0Center = osim.PhysicalOffsetFrame(\"b0_center\", b0, transform) b0.addComponent(b0Center) b0Center.attachGeometry(bodyGeometry.clone()) b1Center = osim.PhysicalOffsetFrame(\"b1_center\",", "a setup file for reference. study.printToXML(\"examplePredictAndTrack_track_states.omoco\") # Solve the problem.", "set during automated testing. if os.getenv('OPENSIM_USE_VISUALIZER') == '0': visualize =", "the origin, and ---o represents a link # in the", "for the specific language governing permissions and # # limitations", "50], 0, 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50],", "import math import opensim as osim \"\"\" This file performs", "# Copyright (c) 2018 Stanford University and the Authors #", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "zero. b0 = osim.Body(\"b0\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b0) b1 =", "guess.setControl(\"/tau0\", [0, 0]) guess.setControl(\"/tau1\", [0, 0]) guess.resampleWithNumTimes(10) solver.setGuess(guess) # Save", "each marker. markerWeights = osim.SetMarkerWeights() markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m0\", 1)) markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m1\", 5)) return", "100]) problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost: track provided marker data.", "error from desired # end effector position. ftCost = osim.MocoFinalTimeGoal()", "bound, upper bound], # final [lower bound, upper bound]. problem.setStateInfo(\"/jointset/j0/q0/value\",", "m1 = model.getComponent(\"markerset/m1\") markerTrajectories.appendRow(state.getTime(), osim.RowVectorVec3([m0.getLocationInGround(state), m1.getLocationInGround(state)])) # Assign a weight", "problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_states_solution.sto\") if visualize: study.visualize(solution) return solution", "represents the origin, and ---o represents a link # in", "createDoublePendulumModel() model.initSystem() states = predictedSolution.exportToStatesTable() statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states) markerTrajectories", "markerTrajectories = osim.TimeSeriesTableVec3() markerTrajectories.setColumnLabels([\"/markerset/m0\", \"/markerset/m1\"]) for state in statesTraj: model.realizePosition(state)", "study.printToXML(\"examplePredictAndTrack_track_markers.omoco\") # Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_markers_solution.sto\") if", "osim.Ellipsoid(0.5, 0.1, 0.1) transform = osim.Transform(osim.Vec3(-0.5, 0, 0)) b0Center =", "# # # Licensed under the Apache License, Version 2.0", "opensim as osim \"\"\" This file performs the following problems", "2, 0)) problem.addGoal(finalCost) # Configure the solver. solver = study.initTropterSolver()", "Arguments are name, [lower bound, upper bound], # initial [lower", "to in writing, software # # distributed under the License", "[0, 2*math.pi]) guess.setState(\"/jointset/j0/q0/speed\", [0, 0]) guess.setState(\"/jointset/j1/q1/speed\", [0, 0]) guess.setControl(\"/tau0\", [0,", "b1, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q1 = j1.updCoordinate() q1.setName(\"q1\") model.addJoint(j0)", "visualize: study.visualize(solution) return solution def computeMarkersReference(predictedSolution): model = createDoublePendulumModel() model.initSystem()", "[lower bound, upper bound], # initial [lower bound, upper bound],", "TODO problem.addGoal(effort) # Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(50)", "locations. m0 = osim.Marker(\"m0\", b0, osim.Vec3(0)) m1 = osim.Marker(\"m1\", b1,", "= osim.StatesTrajectory.createFromStatesTable(model, states) markerTrajectories = osim.TimeSeriesTableVec3() markerTrajectories.setColumnLabels([\"/markerset/m0\", \"/markerset/m1\"]) for state", "markerWeights = osim.SetMarkerWeights() markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m0\", 1)) markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m1\", 5)) return osim.MarkersReference(markerTrajectories, markerWeights)", "implied. # # See the License for the specific language", "= osim.CoordinateActuator() tau0.setCoordinate(j0.updCoordinate()) tau0.setName(\"tau0\") tau0.setOptimalForce(1) model.addComponent(tau0) tau1 = osim.CoordinateActuator() tau1.setCoordinate(j1.updCoordinate())", "0, 0), osim.Vec3(0)) q1 = j1.updCoordinate() q1.setName(\"q1\") model.addJoint(j0) model.addJoint(j1) tau0", "# initial [lower bound, upper bound], # final [lower bound,", "provided state data. stateTracking = osim.MocoStateTrackingGoal() stateTracking.setReference(osim.TableProcessor(stateRef)) problem.addGoal(stateTracking) effort =", "(the \"License\"); you may # # not use this file", "law or agreed to in writing, software # # distributed", "1]) guess.setState(\"/jointset/j0/q0/value\", [0, -math.pi]) guess.setState(\"/jointset/j1/q1/value\", [0, 2*math.pi]) guess.setState(\"/jointset/j0/q0/speed\", [0, 0])", "two links, each with a mass of 1 kg, center", "predict an optimal trajectory (and controls), 2. track the states", "| # +---o---o + # # iniital pose final pose", "problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost: minimize final", "final time and error from desired # end effector position.", "# # copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # #", "markers to body origin locations. m0 = osim.Marker(\"m0\", b0, osim.Vec3(0))", "guess.setState(\"/jointset/j1/q1/value\", [0, 2*math.pi]) guess.setState(\"/jointset/j0/q0/speed\", [0, 0]) guess.setState(\"/jointset/j1/q1/speed\", [0, 0]) guess.setControl(\"/tau0\",", "= osim.MocoMarkerFinalGoal() finalCost.setName(\"final\") finalCost.setWeight(1000.0) finalCost.setPointName(\"/markerset/m1\") finalCost.setReferenceLocation(osim.Vec3(0, 2, 0)) problem.addGoal(finalCost) #", "# # Author(s): <NAME> # # # # Licensed under", "bound, upper bound], # final [lower bound, upper bound]. finalTime", "the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_predict_solution.sto\") if visualize: study.visualize(solution) return", "os.getenv('OPENSIM_USE_VISUALIZER') == '0': visualize = False # Create a model", "0]) guess.setControl(\"/tau0\", [0, 0]) guess.setControl(\"/tau1\", [0, 0]) guess.resampleWithNumTimes(10) solver.setGuess(guess) #", "[-50, 50], 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50],", "= osim.MocoMarkerTrackingGoal() markerTracking.setMarkersReference(markersRef) problem.addGoal(markerTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.0001) #", "examplePredictAndTrack.py # # -------------------------------------------------------------------------- # # Copyright (c) 2018 Stanford", "for a minimum time swing-up. # In the diagram below,", "# Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\")", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "either express or implied. # # See the License for", "# # -------------------------------------------------------------------------- # # Copyright (c) 2018 Stanford University", "# Cost: track provided marker data. markerTracking = osim.MocoMarkerTrackingGoal() markerTracking.setMarkersReference(markersRef)", "from the optimal trajectory, and 3. track the marker trajectories", "# in the double pendulum. # # o # |", "from the optimal trajectory. \"\"\" visualize = True # The", "an optimal trajectory (and controls), 2. track the states from", "problem.setModel(createDoublePendulumModel()) # Bounds. problem.setTimeBounds(0, [0, 5]) # Arguments are name,", "with pin joints. Assume each body is 1 m long.", "BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "bound, upper bound], # initial [lower bound, upper bound], #", "0, 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0,", "[-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost: minimize final time", "osim.Marker(\"m1\", b1, osim.Vec3(0)) model.addMarker(m0) model.addMarker(m1) # Connect the bodies with", "2*math.pi]) guess.setState(\"/jointset/j0/q0/speed\", [0, 0]) guess.setState(\"/jointset/j1/q1/speed\", [0, 0]) guess.setControl(\"/tau0\", [0, 0])", "states from the optimal trajectory, and 3. track the marker", "products of inertia of zero. b0 = osim.Body(\"b0\", 1, osim.Vec3(0),", "= study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") # Save", "limitations under the License. # # -------------------------------------------------------------------------- # import os", "2018 Stanford University and the Authors # # # #", "b1.addComponent(b1Center) b1Center.attachGeometry(bodyGeometry.clone()) model.finalizeConnections() model.printToXML(\"double_pendulum.osim\") return model def solvePrediction(): # Predict", "guess.setState(\"/jointset/j0/q0/speed\", [0, 0]) guess.setState(\"/jointset/j1/q1/speed\", [0, 0]) guess.setControl(\"/tau0\", [0, 0]) guess.setControl(\"/tau1\",", "solution = study.solve() solution.write(\"examplePredictAndTrack_track_states_solution.sto\") if visualize: study.visualize(solution) return solution def", "# Configure the solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(100) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\")", "with the License. You may obtain a # # copy", "100]) problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost: minimize final time and", "= True # The following environment variable is set during", "# +---o---o + # # iniital pose final pose #", "# # limitations under the License. # # -------------------------------------------------------------------------- #", "of mass at the body's # origin, and moments and", "# # See the License for the specific language governing", "# Assign a weight to each marker. markerWeights = osim.SetMarkerWeights()", "solution = study.solve() solution.write(\"examplePredictAndTrack_predict_solution.sto\") if visualize: study.visualize(solution) return solution def", "and ---o represents a link # in the double pendulum.", "# # Unless required by applicable law or agreed to", "o # | # o # | # +---o---o +", "# # # Unless required by applicable law or agreed", "and moments and products of inertia of zero. b0 =", "problem to a setup file for reference. study.printToXML(\"examplePredictAndTrack_track_states.omoco\") # Solve", "-------------------------------------------------------------------------- # import os import math import opensim as osim", "# -------------------------------------------------------------------------- # # Copyright (c) 2018 Stanford University and", "or agreed to in writing, software # # distributed under", "visualize = False # Create a model of a double", "of zero. b0 = osim.Body(\"b0\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b0) b1", "if visualize: study.visualize(solution) return solution optimalTrajectory = solvePrediction() markersRef =", "problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50,", "solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") solver.setGuess(guess) # Save the problem", "the following problems using a double pendulum model: 1. predict", "import opensim as osim \"\"\" This file performs the following", "except in compliance with the License. You may obtain a", "initial [lower bound, upper bound], # final [lower bound, upper", "= osim.SetMarkerWeights() markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m0\", 1)) markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m1\", 5)) return osim.MarkersReference(markerTrajectories, markerWeights) def", "License. # # -------------------------------------------------------------------------- # import os import math import", "the optimal trajectory, and 3. track the marker trajectories from", "bound], # initial [lower bound, upper bound], # final [lower", "the specific language governing permissions and # # limitations under", "solver.createGuess() guess.setNumTimes(2) guess.setTime([0, 1]) guess.setState(\"/jointset/j0/q0/value\", [0, -math.pi]) guess.setState(\"/jointset/j1/q1/value\", [0, 2*math.pi])", "guess.setState(\"/jointset/j1/q1/speed\", [0, 0]) guess.setControl(\"/tau0\", [0, 0]) guess.setControl(\"/tau1\", [0, 0]) guess.resampleWithNumTimes(10)", "link # in the double pendulum. # # o #", "m1.getLocationInGround(state)])) # Assign a weight to each marker. markerWeights =", "represents a link # in the double pendulum. # #", "model.addBody(b1) # Add markers to body origin locations. m0 =", "optimal trajectory for a minimum time swing-up. # In the", "j0.updCoordinate() q0.setName(\"q0\") j1 = osim.PinJoint(\"j1\", b0, osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1,", "statesTraj: model.realizePosition(state) m0 = model.getComponent(\"markerset/m0\") m1 = model.getComponent(\"markerset/m1\") markerTrajectories.appendRow(state.getTime(), osim.RowVectorVec3([m0.getLocationInGround(state),", "problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. #", "of the License at http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless", "a minimum time swing-up. study = osim.MocoStudy() study.setName(\"double_pendulum_track\") problem =", "guess): # Predict the optimal trajectory for a minimum time", "or implied. # # See the License for the specific", "= study.solve() solution.write(\"examplePredictAndTrack_predict_solution.sto\") if visualize: study.visualize(solution) return solution def computeMarkersReference(predictedSolution):", "100]) # Cost: minimize final time and error from desired", "the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_markers_solution.sto\") if visualize: study.visualize(solution) return", "the optimal trajectory for a minimum time swing-up. # In", "---o represents a link # in the double pendulum. #", "problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0, 0) problem.setStateInfo(\"/jointset/j1/q1/value\",", "finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0) problem.setStateInfo(\"/jointset/j1/q1/value\",", "m0 = model.getComponent(\"markerset/m0\") m1 = model.getComponent(\"markerset/m1\") markerTrajectories.appendRow(state.getTime(), osim.RowVectorVec3([m0.getLocationInGround(state), m1.getLocationInGround(state)])) #", "model.setName(\"double_pendulum\") # Create two links, each with a mass of", "trajectory for a minimum time swing-up. # In the diagram", "finalCost.setWeight(1000.0) finalCost.setPointName(\"/markerset/m1\") finalCost.setReferenceLocation(osim.Vec3(0, 2, 0)) problem.addGoal(finalCost) # Configure the solver.", "performs the following problems using a double pendulum model: 1.", "finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\",", "[-50, 50], 0, 0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100])", "= study.solve() solution.write(\"examplePredictAndTrack_track_markers_solution.sto\") if visualize: study.visualize(solution) return solution optimalTrajectory =", "q0 = j0.updCoordinate() q0.setName(\"q0\") j1 = osim.PinJoint(\"j1\", b0, osim.Vec3(0), osim.Vec3(0),", "by applicable law or agreed to in writing, software #", "at http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable", "for reference. study.printToXML(\"examplePredictAndTrack_predict.omoco\") # Solve the problem. solution = study.solve()", "bound], # final [lower bound, upper bound]. finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1]", "[-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0, 0) problem.setControlInfo(\"/tau0\", [-100,", "OpenSim Moco: examplePredictAndTrack.py # # -------------------------------------------------------------------------- # # Copyright (c)", "trajectory, and 3. track the marker trajectories from the optimal", "# Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_markers_solution.sto\") if visualize:", "Apache License, Version 2.0 (the \"License\"); you may # #", "an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS", "Cost: track provided marker data. markerTracking = osim.MocoMarkerTrackingGoal() markerTracking.setMarkersReference(markersRef) problem.addGoal(markerTracking)", "[-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0) problem.setControlInfo(\"/tau0\", [-100, 100])", "mass at the body's # origin, and moments and products", "to each marker. markerWeights = osim.SetMarkerWeights() markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m0\", 1)) markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m1\", 5))", "osim.Body(\"b1\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b1) # Add markers to body", "ftCost.setWeight(0.001) problem.addGoal(ftCost) finalCost = osim.MocoMarkerFinalGoal() finalCost.setName(\"final\") finalCost.setWeight(1000.0) finalCost.setPointName(\"/markerset/m1\") finalCost.setReferenceLocation(osim.Vec3(0, 2,", "final [lower bound, upper bound]. finalTime = stateRef.getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime)", "0) problem.setControlInfo(\"/tau0\", [-150, 150]) problem.setControlInfo(\"/tau1\", [-150, 150]) # Cost: track", "Author(s): <NAME> # # # # Licensed under the Apache", "you may # # not use this file except in", "bound], # final [lower bound, upper bound]. problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10],", "effort.setWeight(0.001) # TODO problem.addGoal(effort) # Configure the solver. solver =", "file for reference. study.printToXML(\"examplePredictAndTrack_track_markers.omoco\") # Solve the problem. solution =", "express or implied. # # See the License for the", "bound]. problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0, 0)", "model.initSystem() states = predictedSolution.exportToStatesTable() statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states) markerTrajectories =", "You may obtain a # # copy of the License", "[-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10],", "[lower bound, upper bound]. problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50,", "following environment variable is set during automated testing. if os.getenv('OPENSIM_USE_VISUALIZER')", "writing, software # # distributed under the License is distributed", "problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0)", "Version 2.0 (the \"License\"); you may # # not use", "osim.RowVectorVec3([m0.getLocationInGround(state), m1.getLocationInGround(state)])) # Assign a weight to each marker. markerWeights", "= markersRef.getMarkerTable().getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50,", "marker data. markerTracking = osim.MocoMarkerTrackingGoal() markerTracking.setMarkersReference(markersRef) problem.addGoal(markerTracking) effort = osim.MocoControlGoal()", "file for reference. study.printToXML(\"examplePredictAndTrack_track_states.omoco\") # Solve the problem. solution =", "agreed to in writing, software # # distributed under the", "[-100, 100]) # Cost: minimize final time and error from", "double pendulum. # ------------------------------------ def createDoublePendulumModel(): model = osim.Model() model.setName(\"double_pendulum\")", "def createDoublePendulumModel(): model = osim.Model() model.setName(\"double_pendulum\") # Create two links,", "kg, center of mass at the body's # origin, and", "study.printToXML(\"examplePredictAndTrack_track_states.omoco\") # Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_states_solution.sto\") if", "1. predict an optimal trajectory (and controls), 2. track the", "finalTime = stateRef.getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\",", "markerTracking = osim.MocoMarkerTrackingGoal() markerTracking.setMarkersReference(markersRef) problem.addGoal(markerTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.0001)", "Unless required by applicable law or agreed to in writing,", "computeMarkersReference(predictedSolution): model = createDoublePendulumModel() model.initSystem() states = predictedSolution.exportToStatesTable() statesTraj =", "guess = solver.createGuess() guess.setNumTimes(2) guess.setTime([0, 1]) guess.setState(\"/jointset/j0/q0/value\", [0, -math.pi]) guess.setState(\"/jointset/j1/q1/value\",", "a link # in the double pendulum. # # o", "osim.MocoMarkerTrackingGoal() markerTracking.setMarkersReference(markersRef) problem.addGoal(markerTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.0001) # problem.addGoal(effort)", "= study.solve() solution.write(\"examplePredictAndTrack_track_states_solution.sto\") if visualize: study.visualize(solution) return solution def solveMarkerTracking(markersRef,", "solver. solver = study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\")", "b0, osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q1 =", "of 1 kg, center of mass at the body's #", "effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.001) # TODO problem.addGoal(effort) # Configure", "= predictedSolution.exportToStatesTable() statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states) markerTrajectories = osim.TimeSeriesTableVec3() markerTrajectories.setColumnLabels([\"/markerset/m0\",", "0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0) problem.setControlInfo(\"/tau0\", [-150, 150]) problem.setControlInfo(\"/tau1\", [-150,", "bodyGeometry = osim.Ellipsoid(0.5, 0.1, 0.1) transform = osim.Transform(osim.Vec3(-0.5, 0, 0))", "b1 = osim.Body(\"b1\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b1) # Add markers", "origin, and ---o represents a link # in the double", "(c) 2018 Stanford University and the Authors # # #", "# # o # | # o # | #", "setup file for reference. study.printToXML(\"examplePredictAndTrack_track_states.omoco\") # Solve the problem. solution", "trajectory for a minimum time swing-up. study = osim.MocoStudy() study.setName(\"double_pendulum_track\")", "0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0, 0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\",", "study.setName(\"double_pendulum_predict\") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds.", "provided marker data. markerTracking = osim.MocoMarkerTrackingGoal() markerTracking.setMarkersReference(markersRef) problem.addGoal(markerTracking) effort =", "origin locations. m0 = osim.Marker(\"m0\", b0, osim.Vec3(0)) m1 = osim.Marker(\"m1\",", "<NAME> # # # # Licensed under the Apache License,", "0, 0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\", [-100, 100]) # Cost:", "end effector position. ftCost = osim.MocoFinalTimeGoal() ftCost.setWeight(0.001) problem.addGoal(ftCost) finalCost =", "osim.Inertia(1)) model.addBody(b1) # Add markers to body origin locations. m0", "1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b0) b1 = osim.Body(\"b1\", 1, osim.Vec3(0), osim.Inertia(1))", "return osim.MarkersReference(markerTrajectories, markerWeights) def solveStateTracking(stateRef): # Predict the optimal trajectory", "guess.setControl(\"/tau1\", [0, 0]) guess.resampleWithNumTimes(10) solver.setGuess(guess) # Save the problem to", "software # # distributed under the License is distributed on", "on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR", "model of a double pendulum. # ------------------------------------ def createDoublePendulumModel(): model", "solution.write(\"examplePredictAndTrack_track_states_solution.sto\") if visualize: study.visualize(solution) return solution def solveMarkerTracking(markersRef, guess): #", "in the double pendulum. # # o # | #", "transform) b1.addComponent(b1Center) b1Center.attachGeometry(bodyGeometry.clone()) model.finalizeConnections() model.printToXML(\"double_pendulum.osim\") return model def solvePrediction(): #", "[-150, 150]) # Cost: track provided state data. stateTracking =", "is set during automated testing. if os.getenv('OPENSIM_USE_VISUALIZER') == '0': visualize", "moments and products of inertia of zero. b0 = osim.Body(\"b0\",", "copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # # #", "final pose # study = osim.MocoStudy() study.setName(\"double_pendulum_predict\") problem = study.updProblem()", "finalCost.setName(\"final\") finalCost.setWeight(1000.0) finalCost.setPointName(\"/markerset/m1\") finalCost.setReferenceLocation(osim.Vec3(0, 2, 0)) problem.addGoal(finalCost) # Configure the", "= study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") solver.setGuess(guess) #", "osim.Vec3(-1, 0, 0), osim.Vec3(0)) q0 = j0.updCoordinate() q0.setName(\"q0\") j1 =", "2. track the states from the optimal trajectory, and 3.", "def solveStateTracking(stateRef): # Predict the optimal trajectory for a minimum", "model.finalizeConnections() model.printToXML(\"double_pendulum.osim\") return model def solvePrediction(): # Predict the optimal", "model.addBody(b0) b1 = osim.Body(\"b1\", 1, osim.Vec3(0), osim.Inertia(1)) model.addBody(b1) # Add", "[0, 0]) guess.setState(\"/jointset/j1/q1/speed\", [0, 0]) guess.setControl(\"/tau0\", [0, 0]) guess.setControl(\"/tau1\", [0,", "optimal trajectory for a minimum time swing-up. study = osim.MocoStudy()", "License, Version 2.0 (the \"License\"); you may # # not", "model.addComponent(tau1) # Add display geometry. bodyGeometry = osim.Ellipsoid(0.5, 0.1, 0.1)", "osim.Marker(\"m0\", b0, osim.Vec3(0)) m1 = osim.Marker(\"m1\", b1, osim.Vec3(0)) model.addMarker(m0) model.addMarker(m1)", "\"\"\" This file performs the following problems using a double", "http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law", "environment variable is set during automated testing. if os.getenv('OPENSIM_USE_VISUALIZER') ==", "# Cost: minimize final time and error from desired #", "[0, -math.pi]) guess.setState(\"/jointset/j1/q1/value\", [0, 2*math.pi]) guess.setState(\"/jointset/j0/q0/speed\", [0, 0]) guess.setState(\"/jointset/j1/q1/speed\", [0,", "# Bounds. # Arguments are name, [lower bound, upper bound],", "solvePrediction() markersRef = computeMarkersReference(optimalTrajectory) trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable()) trackedSolution2 = solveMarkerTracking(markersRef,", "1 m long. j0 = osim.PinJoint(\"j0\", model.getGround(), osim.Vec3(0), osim.Vec3(0), b0,", "model.printToXML(\"double_pendulum.osim\") return model def solvePrediction(): # Predict the optimal trajectory", "# Add display geometry. bodyGeometry = osim.Ellipsoid(0.5, 0.1, 0.1) transform", "a # # copy of the License at http://www.apache.org/licenses/LICENSE-2.0 #", "# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m1\", 5)) return osim.MarkersReference(markerTrajectories, markerWeights) def solveStateTracking(stateRef): # Predict the", "osim.MocoStudy() study.setName(\"double_pendulum_predict\") problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) #", "| # o # | # +---o---o + # #", "solver.set_optim_solver(\"ipopt\") guess = solver.createGuess() guess.setNumTimes(2) guess.setTime([0, 1]) guess.setState(\"/jointset/j0/q0/value\", [0, -math.pi])", "# copy of the License at http://www.apache.org/licenses/LICENSE-2.0 # # #", "reference. study.printToXML(\"examplePredictAndTrack_track_markers.omoco\") # Solve the problem. solution = study.solve() solution.write(\"examplePredictAndTrack_track_markers_solution.sto\")", "problem to a setup file for reference. study.printToXML(\"examplePredictAndTrack_predict.omoco\") # Solve", "problem = study.updProblem() # Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. problem.setTimeBounds(0,", "5]) # Arguments are name, [lower bound, upper bound], #", "statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states) markerTrajectories = osim.TimeSeriesTableVec3() markerTrajectories.setColumnLabels([\"/markerset/m0\", \"/markerset/m1\"]) for", "[-150, 150]) problem.setControlInfo(\"/tau1\", [-150, 150]) # Cost: track provided state", "may obtain a # # copy of the License at", "upper bound], # final [lower bound, upper bound]. problem.setStateInfo(\"/jointset/j0/q0/value\", [-10,", "variable is set during automated testing. if os.getenv('OPENSIM_USE_VISUALIZER') == '0':", "solution.write(\"examplePredictAndTrack_track_markers_solution.sto\") if visualize: study.visualize(solution) return solution optimalTrajectory = solvePrediction() markersRef", "following problems using a double pendulum model: 1. predict an", "trajectory (and controls), 2. track the states from the optimal", "the License for the specific language governing permissions and #", "testing. if os.getenv('OPENSIM_USE_VISUALIZER') == '0': visualize = False # Create", "Connect the bodies with pin joints. Assume each body is", "to body origin locations. m0 = osim.Marker(\"m0\", b0, osim.Vec3(0)) m1", "the body's # origin, and moments and products of inertia", "visualize = True # The following environment variable is set", "applicable law or agreed to in writing, software # #", "during automated testing. if os.getenv('OPENSIM_USE_VISUALIZER') == '0': visualize = False", "= osim.PinJoint(\"j0\", model.getGround(), osim.Vec3(0), osim.Vec3(0), b0, osim.Vec3(-1, 0, 0), osim.Vec3(0))", "[-10, 10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0) problem.setControlInfo(\"/tau0\", [-150, 150])", "return solution def computeMarkersReference(predictedSolution): model = createDoublePendulumModel() model.initSystem() states =", "model.getGround(), osim.Vec3(0), osim.Vec3(0), b0, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q0 =", "problems using a double pendulum model: 1. predict an optimal", "the optimal trajectory. \"\"\" visualize = True # The following", "at the body's # origin, and moments and products of", "optimal trajectory. \"\"\" visualize = True # The following environment", "License is distributed on an \"AS IS\" BASIS, # #", "osim.Vec3(0), b0, osim.Vec3(-1, 0, 0), osim.Vec3(0)) q0 = j0.updCoordinate() q0.setName(\"q0\")", "= osim.Ellipsoid(0.5, 0.1, 0.1) transform = osim.Transform(osim.Vec3(-0.5, 0, 0)) b0Center", "from desired # end effector position. ftCost = osim.MocoFinalTimeGoal() ftCost.setWeight(0.001)", "effort.setName(\"effort\") effort.setWeight(0.001) # TODO problem.addGoal(effort) # Configure the solver. solver", "osim.Vec3(0)) q0 = j0.updCoordinate() q0.setName(\"q0\") j1 = osim.PinJoint(\"j1\", b0, osim.Vec3(0),", "= osim.Transform(osim.Vec3(-0.5, 0, 0)) b0Center = osim.PhysicalOffsetFrame(\"b0_center\", b0, transform) b0.addComponent(b0Center)", "# In the diagram below, + represents the origin, and", "j1 = osim.PinJoint(\"j1\", b0, osim.Vec3(0), osim.Vec3(0), b1, osim.Vec3(-1, 0, 0),", "as osim \"\"\" This file performs the following problems using", "osim.TimeSeriesTableVec3() markerTrajectories.setColumnLabels([\"/markerset/m0\", \"/markerset/m1\"]) for state in statesTraj: model.realizePosition(state) m0 =", "Assign a weight to each marker. markerWeights = osim.SetMarkerWeights() markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m0\",", "10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\", [-50, 50], 0) problem.setStateInfo(\"/jointset/j1/q1/value\", [-10, 10], 0)", "10], 0) problem.setStateInfo(\"/jointset/j1/q1/speed\", [-50, 50], 0) problem.setControlInfo(\"/tau0\", [-100, 100]) problem.setControlInfo(\"/tau1\",", "stateTracking.setReference(osim.TableProcessor(stateRef)) problem.addGoal(stateTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.001) # TODO problem.addGoal(effort)", "pose # study = osim.MocoStudy() study.setName(\"double_pendulum_predict\") problem = study.updProblem() #", "ftCost = osim.MocoFinalTimeGoal() ftCost.setWeight(0.001) problem.addGoal(ftCost) finalCost = osim.MocoMarkerFinalGoal() finalCost.setName(\"final\") finalCost.setWeight(1000.0)", "[lower bound, upper bound]. finalTime = stateRef.getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\",", "# # not use this file except in compliance with", "== '0': visualize = False # Create a model of", "OF ANY KIND, either express or implied. # # See", "model.realizePosition(state) m0 = model.getComponent(\"markerset/m0\") m1 = model.getComponent(\"markerset/m1\") markerTrajectories.appendRow(state.getTime(), osim.RowVectorVec3([m0.getLocationInGround(state), m1.getLocationInGround(state)]))", "minimum time swing-up. # In the diagram below, + represents", "(and controls), 2. track the states from the optimal trajectory,", "marker. markerWeights = osim.SetMarkerWeights() markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m0\", 1)) markerWeights.cloneAndAppend(osim.MarkerWeight(\"/markerset/m1\", 5)) return osim.MarkersReference(markerTrajectories,", "setup file for reference. study.printToXML(\"examplePredictAndTrack_track_markers.omoco\") # Solve the problem. solution", "# # # # Licensed under the Apache License, Version", "o # | # +---o---o + # # iniital pose", "Bounds. problem.setTimeBounds(0, [0, 5]) # Arguments are name, [lower bound,", "# ------------------------------------ def createDoublePendulumModel(): model = osim.Model() model.setName(\"double_pendulum\") # Create", "effort.setName(\"effort\") effort.setWeight(0.0001) # problem.addGoal(effort) # Configure the solver. solver =", "solver.set_exact_hessian_block_sparsity_mode(\"dense\") solver.setGuess(guess) # Save the problem to a setup file", "may # # not use this file except in compliance", "b0Center.attachGeometry(bodyGeometry.clone()) b1Center = osim.PhysicalOffsetFrame(\"b1_center\", b1, transform) b1.addComponent(b1Center) b1Center.attachGeometry(bodyGeometry.clone()) model.finalizeConnections() model.printToXML(\"double_pendulum.osim\")", "bound, upper bound]. finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10,", "states) markerTrajectories = osim.TimeSeriesTableVec3() markerTrajectories.setColumnLabels([\"/markerset/m0\", \"/markerset/m1\"]) for state in statesTraj:", "def solvePrediction(): # Predict the optimal trajectory for a minimum", "file performs the following problems using a double pendulum model:", "a double pendulum model: 1. predict an optimal trajectory (and", "0), osim.Vec3(0)) q1 = j1.updCoordinate() q1.setName(\"q1\") model.addJoint(j0) model.addJoint(j1) tau0 =", "language governing permissions and # # limitations under the License.", "body origin locations. m0 = osim.Marker(\"m0\", b0, osim.Vec3(0)) m1 =", "j0 = osim.PinJoint(\"j0\", model.getGround(), osim.Vec3(0), osim.Vec3(0), b0, osim.Vec3(-1, 0, 0),", "under the License. # # -------------------------------------------------------------------------- # import os import", "state data. stateTracking = osim.MocoStateTrackingGoal() stateTracking.setReference(osim.TableProcessor(stateRef)) problem.addGoal(stateTracking) effort = osim.MocoControlGoal()", "# # # Author(s): <NAME> # # # # Licensed", "# distributed under the License is distributed on an \"AS", "# final [lower bound, upper bound]. finalTime = markersRef.getMarkerTable().getIndependentColumn()[-1] problem.setTimeBounds(0,", "problem.addGoal(stateTracking) effort = osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.001) # TODO problem.addGoal(effort) #", "# Unless required by applicable law or agreed to in", "3. track the marker trajectories from the optimal trajectory. \"\"\"", "a setup file for reference. study.printToXML(\"examplePredictAndTrack_predict.omoco\") # Solve the problem.", "# Bounds. problem.setTimeBounds(0, [0, 5]) # Arguments are name, [lower", "problem.setControlInfo(\"/tau0\", [-150, 150]) problem.setControlInfo(\"/tau1\", [-150, 150]) # Cost: track provided", "the problem to a setup file for reference. study.printToXML(\"examplePredictAndTrack_predict.omoco\") #", "Create a model of a double pendulum. # ------------------------------------ def", "osim.Vec3(0)) m1 = osim.Marker(\"m1\", b1, osim.Vec3(0)) model.addMarker(m0) model.addMarker(m1) # Connect", "solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") # Save the problem to", "5)) return osim.MarkersReference(markerTrajectories, markerWeights) def solveStateTracking(stateRef): # Predict the optimal", "a mass of 1 kg, center of mass at the", "[lower bound, upper bound], # final [lower bound, upper bound].", "-------------------------------------------------------------------------- # # OpenSim Moco: examplePredictAndTrack.py # # -------------------------------------------------------------------------- #", "# # # # Author(s): <NAME> # # # #", "time and error from desired # end effector position. ftCost", "# Model (dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. problem.setTimeBounds(0, [0, 5]) #", "Save the problem to a setup file for reference. study.printToXML(\"examplePredictAndTrack_track_markers.omoco\")", "to a setup file for reference. study.printToXML(\"examplePredictAndTrack_track_markers.omoco\") # Solve the", "bound, upper bound]. finalTime = stateRef.getIndependentColumn()[-1] problem.setTimeBounds(0, finalTime) problem.setStateInfo(\"/jointset/j0/q0/value\", [-10,", "the optimal trajectory for a minimum time swing-up. study =", "study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") solver.setGuess(guess) # Save", "problem.addGoal(ftCost) finalCost = osim.MocoMarkerFinalGoal() finalCost.setName(\"final\") finalCost.setWeight(1000.0) finalCost.setPointName(\"/markerset/m1\") finalCost.setReferenceLocation(osim.Vec3(0, 2, 0))", "study = osim.MocoStudy() study.setName(\"double_pendulum_predict\") problem = study.updProblem() # Model (dynamics).", "body's # origin, and moments and products of inertia of", "solver = study.initTropterSolver() solver.set_num_mesh_intervals(50) solver.set_verbosity(2) solver.set_optim_solver(\"ipopt\") solver.set_optim_jacobian_approximation(\"exact\") solver.set_optim_hessian_approximation(\"exact\") solver.set_exact_hessian_block_sparsity_mode(\"dense\") solver.setGuess(guess)", "problem to a setup file for reference. study.printToXML(\"examplePredictAndTrack_track_markers.omoco\") # Solve", "osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.0001) # problem.addGoal(effort) # Configure the solver. solver", "the problem to a setup file for reference. study.printToXML(\"examplePredictAndTrack_track_states.omoco\") #", "-math.pi]) guess.setState(\"/jointset/j1/q1/value\", [0, 2*math.pi]) guess.setState(\"/jointset/j0/q0/speed\", [0, 0]) guess.setState(\"/jointset/j1/q1/speed\", [0, 0])", "= createDoublePendulumModel() model.initSystem() states = predictedSolution.exportToStatesTable() statesTraj = osim.StatesTrajectory.createFromStatesTable(model, states)", "markerTrajectories.appendRow(state.getTime(), osim.RowVectorVec3([m0.getLocationInGround(state), m1.getLocationInGround(state)])) # Assign a weight to each marker.", "osim.MocoControlGoal() effort.setName(\"effort\") effort.setWeight(0.001) # TODO problem.addGoal(effort) # Configure the solver.", "final [lower bound, upper bound]. problem.setStateInfo(\"/jointset/j0/q0/value\", [-10, 10], 0) problem.setStateInfo(\"/jointset/j0/q0/speed\",", "guess.setState(\"/jointset/j0/q0/value\", [0, -math.pi]) guess.setState(\"/jointset/j1/q1/value\", [0, 2*math.pi]) guess.setState(\"/jointset/j0/q0/speed\", [0, 0]) guess.setState(\"/jointset/j1/q1/speed\",", "(dynamics). problem.setModel(createDoublePendulumModel()) # Bounds. # Arguments are name, [lower bound,", "= solvePrediction() markersRef = computeMarkersReference(optimalTrajectory) trackedSolution = solveStateTracking(optimalTrajectory.exportToStatesTable()) trackedSolution2 =", "a double pendulum. # ------------------------------------ def createDoublePendulumModel(): model = osim.Model()", "osim.MocoMarkerFinalGoal() finalCost.setName(\"final\") finalCost.setWeight(1000.0) finalCost.setPointName(\"/markerset/m1\") finalCost.setReferenceLocation(osim.Vec3(0, 2, 0)) problem.addGoal(finalCost) # Configure", "permissions and # # limitations under the License. # #", "and products of inertia of zero. b0 = osim.Body(\"b0\", 1," ]
[ "DataBase ''' meetingData.to_sql('MeetingData', con = conn, if_exists='replace', index = False)", "text,Video Text)\"\"\") def enterData(self,meetingData): ''' Enters Data From The UI", "conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS MeetingData (Name text,ID text,Password text,", "EXISTS MeetingData (Name text,ID text,Password text, DateTime text,Audio text,Video Text)\"\"\")", "index = False) def readData(self): ''' Reads Data From The", "Doesnt Exist ''' conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS MeetingData (Name", "MeetingData (Name text,ID text,Password text, DateTime text,Audio text,Video Text)\"\"\") def", "The SQL DataBase ''' self.cursor.execute('''SELECT * FROM MeetingData''') retVal =", "If it Doesnt Exist ''' conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS", "= conn, if_exists='replace', index = False) def readData(self): ''' Reads", "def readData(self): ''' Reads Data From The SQL DataBase '''", "conn.cursor() def __init__(self): self.createTable() def createTable(self): ''' Creates A Table", "conn, if_exists='replace', index = False) def readData(self): ''' Reads Data", "= False) def readData(self): ''' Reads Data From The SQL", "pandas import DataFrame conn = sqlite3.connect('./data.db',check_same_thread=False) class DataBase(): cursor =", "it Doesnt Exist ''' conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS MeetingData", "NOT EXISTS MeetingData (Name text,ID text,Password text, DateTime text,Audio text,Video", "''' Creates A Table If it Doesnt Exist ''' conn.execute(\"\"\"CREATE", "sqlite3.connect('./data.db',check_same_thread=False) class DataBase(): cursor = conn.cursor() def __init__(self): self.createTable() def", "''' Enters Data From The UI Table To The DataBase", "The DataBase ''' meetingData.to_sql('MeetingData', con = conn, if_exists='replace', index =", "Table If it Doesnt Exist ''' conn.execute(\"\"\"CREATE TABLE IF NOT", "self.createTable() def createTable(self): ''' Creates A Table If it Doesnt", "<gh_stars>0 import sqlite3 from pandas import DataFrame conn = sqlite3.connect('./data.db',check_same_thread=False)", "(Name text,ID text,Password text, DateTime text,Audio text,Video Text)\"\"\") def enterData(self,meetingData):", "if_exists='replace', index = False) def readData(self): ''' Reads Data From", "TABLE IF NOT EXISTS MeetingData (Name text,ID text,Password text, DateTime", "Data From The UI Table To The DataBase ''' meetingData.to_sql('MeetingData',", "Enters Data From The UI Table To The DataBase '''", "From The SQL DataBase ''' self.cursor.execute('''SELECT * FROM MeetingData''') retVal", "SQL DataBase ''' self.cursor.execute('''SELECT * FROM MeetingData''') retVal = DataFrame(self.cursor.fetchall(),columns=['Name','ID','Password','DateTime','Audio','Video'])", "def createTable(self): ''' Creates A Table If it Doesnt Exist", "From The UI Table To The DataBase ''' meetingData.to_sql('MeetingData', con", "enterData(self,meetingData): ''' Enters Data From The UI Table To The", "def __init__(self): self.createTable() def createTable(self): ''' Creates A Table If", "import sqlite3 from pandas import DataFrame conn = sqlite3.connect('./data.db',check_same_thread=False) class", "text,Audio text,Video Text)\"\"\") def enterData(self,meetingData): ''' Enters Data From The", "To The DataBase ''' meetingData.to_sql('MeetingData', con = conn, if_exists='replace', index", "= conn.cursor() def __init__(self): self.createTable() def createTable(self): ''' Creates A", "IF NOT EXISTS MeetingData (Name text,ID text,Password text, DateTime text,Audio", "Text)\"\"\") def enterData(self,meetingData): ''' Enters Data From The UI Table", "''' conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS MeetingData (Name text,ID text,Password", "''' Reads Data From The SQL DataBase ''' self.cursor.execute('''SELECT *", "class DataBase(): cursor = conn.cursor() def __init__(self): self.createTable() def createTable(self):", "Reads Data From The SQL DataBase ''' self.cursor.execute('''SELECT * FROM", "text,Password text, DateTime text,Audio text,Video Text)\"\"\") def enterData(self,meetingData): ''' Enters", "DataBase ''' self.cursor.execute('''SELECT * FROM MeetingData''') retVal = DataFrame(self.cursor.fetchall(),columns=['Name','ID','Password','DateTime','Audio','Video']) return", "cursor = conn.cursor() def __init__(self): self.createTable() def createTable(self): ''' Creates", "con = conn, if_exists='replace', index = False) def readData(self): '''", "A Table If it Doesnt Exist ''' conn.execute(\"\"\"CREATE TABLE IF", "from pandas import DataFrame conn = sqlite3.connect('./data.db',check_same_thread=False) class DataBase(): cursor", "sqlite3 from pandas import DataFrame conn = sqlite3.connect('./data.db',check_same_thread=False) class DataBase():", "DataFrame conn = sqlite3.connect('./data.db',check_same_thread=False) class DataBase(): cursor = conn.cursor() def", "UI Table To The DataBase ''' meetingData.to_sql('MeetingData', con = conn,", "text,ID text,Password text, DateTime text,Audio text,Video Text)\"\"\") def enterData(self,meetingData): '''", "__init__(self): self.createTable() def createTable(self): ''' Creates A Table If it", "import DataFrame conn = sqlite3.connect('./data.db',check_same_thread=False) class DataBase(): cursor = conn.cursor()", "Exist ''' conn.execute(\"\"\"CREATE TABLE IF NOT EXISTS MeetingData (Name text,ID", "DateTime text,Audio text,Video Text)\"\"\") def enterData(self,meetingData): ''' Enters Data From", "''' meetingData.to_sql('MeetingData', con = conn, if_exists='replace', index = False) def", "''' self.cursor.execute('''SELECT * FROM MeetingData''') retVal = DataFrame(self.cursor.fetchall(),columns=['Name','ID','Password','DateTime','Audio','Video']) return retVal", "Data From The SQL DataBase ''' self.cursor.execute('''SELECT * FROM MeetingData''')", "= sqlite3.connect('./data.db',check_same_thread=False) class DataBase(): cursor = conn.cursor() def __init__(self): self.createTable()", "text, DateTime text,Audio text,Video Text)\"\"\") def enterData(self,meetingData): ''' Enters Data", "def enterData(self,meetingData): ''' Enters Data From The UI Table To", "meetingData.to_sql('MeetingData', con = conn, if_exists='replace', index = False) def readData(self):", "False) def readData(self): ''' Reads Data From The SQL DataBase", "readData(self): ''' Reads Data From The SQL DataBase ''' self.cursor.execute('''SELECT", "DataBase(): cursor = conn.cursor() def __init__(self): self.createTable() def createTable(self): '''", "conn = sqlite3.connect('./data.db',check_same_thread=False) class DataBase(): cursor = conn.cursor() def __init__(self):", "Creates A Table If it Doesnt Exist ''' conn.execute(\"\"\"CREATE TABLE", "The UI Table To The DataBase ''' meetingData.to_sql('MeetingData', con =", "Table To The DataBase ''' meetingData.to_sql('MeetingData', con = conn, if_exists='replace',", "createTable(self): ''' Creates A Table If it Doesnt Exist '''" ]
[ "Returns ------- gdf : GpuDataFrame \"\"\" import numpy as np", "'TIMESTAMP': vals = [None if v is None else base", "'DATE': val = (base + datetime.timedelta(seconds=val)).date() elif typename == 'TIME':", "numpy as np from pygdf.gpuarrow import GpuArrowReader from pygdf.dataframe import", "Returns ------- schema : pyarrow.Schema \"\"\" import pyarrow as pa", "\"internal_size\", \"precision\", \"scale\", \"null_ok\"]) ColumnDetails = namedtuple(\"ColumnDetails\", [\"name\", \"type\", \"nullable\",", "collections import namedtuple from sqlalchemy import text import mapd.ttypes as", "col in row_desc] def _extract_column_details(row_desc): # For Connection.get_table_details return [", "(base + datetime.timedelta(seconds=val)) elif typename == 'DATE': val = (base", "a select ipc_gpu into a GpuDataFrame Parameters ---------- tdf :", "= ipch.open(ctx) schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size) # TODO: extra copy.", "Parse the results of a select ipc_gpu into a GpuDataFrame", "# type: (T.TQueryResult) -> bool return data.row_set.is_columnar def _load_schema(buf): \"\"\"", "rb = pa.read_record_batch(message, schema) return rb.to_pandas() def _parse_tdf_gpu(tdf): \"\"\" Parse", ": TDataFrame Returns ------- gdf : GpuDataFrame \"\"\" import numpy", "v in vals] elif typename == 'TIME': vals = [None", "schema : pyarrow.Schema \"\"\" import pyarrow as pa reader =", "Description = namedtuple(\"Description\", [\"name\", \"type_code\", \"display_size\", \"internal_size\", \"precision\", \"scale\", \"null_ok\"])", ": pyarrow.Schema \"\"\" import pyarrow as pa reader = pa.RecordBatchStreamReader(buf)", "text import mapd.ttypes as T from ._utils import seconds_to_time Description", "val = seconds_to_time(val) return val def _extract_col_vals(desc, val): # type:", "_typeattr[typename] + '_col') vals = [None if null else v", "darr = cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype, gpu_data=dptr) reader = GpuArrowReader(schema_buffer, darr)", "import drvapi from .shm import load_buffer ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch", "(List[T.TColumnType]) -> List[Description] \"\"\" Return a tuple of (name, type_code,", "---------- tdf : TDataFrame Returns ------- gdf : GpuDataFrame \"\"\"", "\"precision\", \"scale\", \"null_ok\"]) ColumnDetails = namedtuple(\"ColumnDetails\", [\"name\", \"type\", \"nullable\", \"precision\",", "+ datetime.timedelta(seconds=v)).date() for v in vals] elif typename == 'TIME':", "written to shared memory Parameters ---------- buf : pyarrow.Buffer Returns", "return rb.to_pandas() def _parse_tdf_gpu(tdf): \"\"\" Parse the results of a", "tdf : TDataFrame Returns ------- gdf : GpuDataFrame \"\"\" import", "'DATE': 'int', 'BOOL': 'int', 'FLOAT': 'real', 'DECIMAL': 'real', 'DOUBLE': 'real',", "None else seconds_to_time(v) for v in vals] return vals def", "col.col_type.nullable) for col in row_desc] def _extract_column_details(row_desc): # For Connection.get_table_details", "darr) df = DataFrame() for k, v in reader.to_dict().items(): df[k]", "'_col') vals = [None if null else v for null,", "in row_desc ] def _is_columnar(data): # type: (T.TQueryResult) -> bool", "# For Connection.get_table_details return [ ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type], x.col_type.nullable, x.col_type.precision, x.col_type.scale,", "cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size) ctx = cuda.current_context() dptr = ipch.open(ctx) schema_buffer", "a GpuDataFrame Parameters ---------- tdf : TDataFrame Returns ------- gdf", "\"precision\", \"scale\", \"comp_param\"]) _typeattr = { 'SMALLINT': 'int', 'INT': 'int',", "else base + datetime.timedelta(seconds=v) for v in vals] elif typename", "== 'DATE': vals = [None if v is None else", "k, v in reader.to_dict().items(): df[k] = v return df def", "from collections import namedtuple from sqlalchemy import text import mapd.ttypes", "'_val') base = datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP':", "T from ._utils import seconds_to_time Description = namedtuple(\"Description\", [\"name\", \"type_code\",", "T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] nulls = val.nulls vals = getattr(val.data, _typeattr[typename] + '_col')", "pyarrow.Schema Returns ------- df : pandas.DataFrame \"\"\" import pyarrow as", "ctx = cuda.current_context() dptr = ipch.open(ctx) schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size)", "# type: (T.TColumnType, T.TDatum) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] if", "= cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype, gpu_data=dptr) reader = GpuArrowReader(schema_buffer, darr) df", "\"display_size\", \"internal_size\", \"precision\", \"scale\", \"null_ok\"]) ColumnDetails = namedtuple(\"ColumnDetails\", [\"name\", \"type\",", "# type: (List[T.TColumnType]) -> List[Description] \"\"\" Return a tuple of", "from sqlalchemy import text import mapd.ttypes as T from ._utils", "col.col_type.type, None, None, None, None, col.col_type.nullable) for col in row_desc]", "\"type_code\", \"display_size\", \"internal_size\", \"precision\", \"scale\", \"null_ok\"]) ColumnDetails = namedtuple(\"ColumnDetails\", [\"name\",", "(T.TColumnType, T.TColumn) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] nulls = val.nulls", "a `pandas.DataFrame` from a buffer written to shared memory Parameters", "'int', 'BOOL': 'int', 'FLOAT': 'real', 'DECIMAL': 'real', 'DOUBLE': 'real', 'STR':", "null_ok) https://www.python.org/dev/peps/pep-0249/#description \"\"\" return [Description(col.col_name, col.col_type.type, None, None, None, None,", "= { 'SMALLINT': 'int', 'INT': 'int', 'BIGINT': 'int', 'TIME': 'int',", "pandas.DataFrame \"\"\" import pyarrow as pa message = pa.read_message(buf) rb", "elif typename == 'TIME': vals = [None if v is", "\"\"\" import numpy as np from pygdf.gpuarrow import GpuArrowReader from", "'real', 'DOUBLE': 'real', 'STR': 'str', } _thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types", "\"scale\", \"null_ok\"]) ColumnDetails = namedtuple(\"ColumnDetails\", [\"name\", \"type\", \"nullable\", \"precision\", \"scale\",", "GpuDataFrame \"\"\" import numpy as np from pygdf.gpuarrow import GpuArrowReader", "_extract_column_details(row_desc): # For Connection.get_table_details return [ ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type], x.col_type.nullable, x.col_type.precision,", "pyarrow.Buffer shcema : pyarrow.Schema Returns ------- df : pandas.DataFrame \"\"\"", "gpu_data=dptr) reader = GpuArrowReader(schema_buffer, darr) df = DataFrame() for k,", "def _extract_description(row_desc): # type: (List[T.TColumnType]) -> List[Description] \"\"\" Return a", "return data.row_set.is_columnar def _load_schema(buf): \"\"\" Load a `pyarrow.Schema` from a", "val def _extract_col_vals(desc, val): # type: (T.TColumnType, T.TColumn) -> Any", "from .shm import load_buffer ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch = cuda.driver.IpcHandle(None,", "T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] if val.is_null: return None val = getattr(val.val, _typeattr[typename] +", "= np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8) dtype = np.dtype(np.byte) darr = cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize,", "for v in vals] elif typename == 'DATE': vals =", "(T.TColumnType, T.TDatum) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] if val.is_null: return", "import numpy as np from pygdf.gpuarrow import GpuArrowReader from pygdf.dataframe", "MapD \"\"\" import datetime from collections import namedtuple from sqlalchemy", "sqlalchemy import text import mapd.ttypes as T from ._utils import", "Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] if val.is_null: return None val =", "for v in vals] elif typename == 'TIME': vals =", "for parsing data returned from MapD \"\"\" import datetime from", "= [None if null else v for null, v in", "= pa.read_message(buf) rb = pa.read_record_batch(message, schema) return rb.to_pandas() def _parse_tdf_gpu(tdf):", "None val = getattr(val.val, _typeattr[typename] + '_val') base = datetime.datetime(1970,", "= cuda.current_context() dptr = ipch.open(ctx) schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size) #", "if typename == 'TIMESTAMP': vals = [None if v is", "typename == 'DATE': val = (base + datetime.timedelta(seconds=val)).date() elif typename", "'TIME': val = seconds_to_time(val) return val def _extract_col_vals(desc, val): #", "to shared memory Parameters ---------- buf : pyarrow.Buffer Returns -------", "T.TColumn) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] nulls = val.nulls vals", "buf : pyarrow.Buffer Returns ------- schema : pyarrow.Schema \"\"\" import", "val): # type: (T.TColumnType, T.TDatum) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type]", "'real', 'STR': 'str', } _thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES", "1) if typename == 'TIMESTAMP': val = (base + datetime.timedelta(seconds=val))", "null else v for null, v in zip(nulls, vals)] base", "seconds_to_time(val) return val def _extract_col_vals(desc, val): # type: (T.TColumnType, T.TColumn)", "to shared memory Parameters ---------- buf : pyarrow.Buffer shcema :", "elif typename == 'DATE': val = (base + datetime.timedelta(seconds=val)).date() elif", "dptr = ipch.open(ctx) schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size) # TODO: extra", "import seconds_to_time Description = namedtuple(\"Description\", [\"name\", \"type_code\", \"display_size\", \"internal_size\", \"precision\",", "from a buffer written to shared memory Parameters ---------- buf", "drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size) ctx = cuda.current_context() dptr", "import datetime from collections import namedtuple from sqlalchemy import text", "List[Description] \"\"\" Return a tuple of (name, type_code, display_size, internal_size,", "the results of a select ipc_gpu into a GpuDataFrame Parameters", "seconds_to_time Description = namedtuple(\"Description\", [\"name\", \"type_code\", \"display_size\", \"internal_size\", \"precision\", \"scale\",", "a tuple of (name, type_code, display_size, internal_size, precision, scale, null_ok)", "reader = pa.RecordBatchStreamReader(buf) return reader.schema def _load_data(buf, schema): \"\"\" Load", "Connection.get_table_details return [ ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type], x.col_type.nullable, x.col_type.precision, x.col_type.scale, x.col_type.comp_param) for", "cuda.current_context() dptr = ipch.open(ctx) schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size) # TODO:", "GpuDataFrame Parameters ---------- tdf : TDataFrame Returns ------- gdf :", "'int', 'TIME': 'int', 'TIMESTAMP': 'int', 'DATE': 'int', 'BOOL': 'int', 'FLOAT':", "== 'TIMESTAMP': val = (base + datetime.timedelta(seconds=val)) elif typename ==", "'int', 'FLOAT': 'real', 'DECIMAL': 'real', 'DOUBLE': 'real', 'STR': 'str', }", "'int', 'BIGINT': 'int', 'TIME': 'int', 'TIMESTAMP': 'int', 'DATE': 'int', 'BOOL':", "zip(nulls, vals)] base = datetime.datetime(1970, 1, 1) if typename ==", "= datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP': vals =", "np from pygdf.gpuarrow import GpuArrowReader from pygdf.dataframe import DataFrame from", "= cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size) ctx = cuda.current_context() dptr = ipch.open(ctx)", "_thrift_values_to_types[x.col_type.type], x.col_type.nullable, x.col_type.precision, x.col_type.scale, x.col_type.comp_param) for x in row_desc ]", "._utils import seconds_to_time Description = namedtuple(\"Description\", [\"name\", \"type_code\", \"display_size\", \"internal_size\",", "numba.cuda.cudadrv import drvapi from .shm import load_buffer ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle)", "namedtuple from sqlalchemy import text import mapd.ttypes as T from", "'TIME': 'int', 'TIMESTAMP': 'int', 'DATE': 'int', 'BOOL': 'int', 'FLOAT': 'real',", "memory Parameters ---------- buf : pyarrow.Buffer Returns ------- schema :", "------- schema : pyarrow.Schema \"\"\" import pyarrow as pa reader", "Parameters ---------- buf : pyarrow.Buffer shcema : pyarrow.Schema Returns -------", "from numba.cuda.cudadrv import drvapi from .shm import load_buffer ipc_handle =", "x.col_type.precision, x.col_type.scale, x.col_type.comp_param) for x in row_desc ] def _is_columnar(data):", "None else base + datetime.timedelta(seconds=v) for v in vals] elif", "'FLOAT': 'real', 'DECIMAL': 'real', 'DOUBLE': 'real', 'STR': 'str', } _thrift_types_to_values", "_extract_col_vals(desc, val): # type: (T.TColumnType, T.TColumn) -> Any typename =", "base = datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP': vals", "import GpuArrowReader from pygdf.dataframe import DataFrame from numba import cuda", "ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size) ctx = cuda.current_context() dptr =", "bool return data.row_set.is_columnar def _load_schema(buf): \"\"\" Load a `pyarrow.Schema` from", "T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES def _extract_row_val(desc, val): # type: (T.TColumnType,", "x.col_type.nullable, x.col_type.precision, x.col_type.scale, x.col_type.comp_param) for x in row_desc ] def", "import cuda from numba.cuda.cudadrv import drvapi from .shm import load_buffer", "precision, scale, null_ok) https://www.python.org/dev/peps/pep-0249/#description \"\"\" return [Description(col.col_name, col.col_type.type, None, None,", "[ ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type], x.col_type.nullable, x.col_type.precision, x.col_type.scale, x.col_type.comp_param) for x in", "np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8) dtype = np.dtype(np.byte) darr = cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype,", "schema): \"\"\" Load a `pandas.DataFrame` from a buffer written to", "namedtuple(\"ColumnDetails\", [\"name\", \"type\", \"nullable\", \"precision\", \"scale\", \"comp_param\"]) _typeattr = {", "if v is None else (base + datetime.timedelta(seconds=v)).date() for v", "'SMALLINT': 'int', 'INT': 'int', 'BIGINT': 'int', 'TIME': 'int', 'TIMESTAMP': 'int',", "as np from pygdf.gpuarrow import GpuArrowReader from pygdf.dataframe import DataFrame", "base + datetime.timedelta(seconds=v) for v in vals] elif typename ==", "parsing data returned from MapD \"\"\" import datetime from collections", "internal_size, precision, scale, null_ok) https://www.python.org/dev/peps/pep-0249/#description \"\"\" return [Description(col.col_name, col.col_type.type, None,", "schema) return rb.to_pandas() def _parse_tdf_gpu(tdf): \"\"\" Parse the results of", "Parameters ---------- buf : pyarrow.Buffer Returns ------- schema : pyarrow.Schema", "vals] elif typename == 'DATE': vals = [None if v", "shcema : pyarrow.Schema Returns ------- df : pandas.DataFrame \"\"\" import", "{ 'SMALLINT': 'int', 'INT': 'int', 'BIGINT': 'int', 'TIME': 'int', 'TIMESTAMP':", "from pygdf.gpuarrow import GpuArrowReader from pygdf.dataframe import DataFrame from numba", "= T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] if val.is_null: return None val = getattr(val.val, _typeattr[typename]", "for k, v in reader.to_dict().items(): df[k] = v return df", "data returned from MapD \"\"\" import datetime from collections import", "display_size, internal_size, precision, scale, null_ok) https://www.python.org/dev/peps/pep-0249/#description \"\"\" return [Description(col.col_name, col.col_type.type,", "'INT': 'int', 'BIGINT': 'int', 'TIME': 'int', 'TIMESTAMP': 'int', 'DATE': 'int',", "For Connection.get_table_details return [ ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type], x.col_type.nullable, x.col_type.precision, x.col_type.scale, x.col_type.comp_param)", "as pa message = pa.read_message(buf) rb = pa.read_record_batch(message, schema) return", "methods for parsing data returned from MapD \"\"\" import datetime", "pa.read_message(buf) rb = pa.read_record_batch(message, schema) return rb.to_pandas() def _parse_tdf_gpu(tdf): \"\"\"", ": pyarrow.Schema Returns ------- df : pandas.DataFrame \"\"\" import pyarrow", "mapd.ttypes as T from ._utils import seconds_to_time Description = namedtuple(\"Description\",", "-> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] if val.is_null: return None val", "import DataFrame from numba import cuda from numba.cuda.cudadrv import drvapi", "= T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES def _extract_row_val(desc, val): # type:", "copy. schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8) dtype = np.dtype(np.byte) darr =", "import pyarrow as pa message = pa.read_message(buf) rb = pa.read_record_batch(message,", "data.row_set.is_columnar def _load_schema(buf): \"\"\" Load a `pyarrow.Schema` from a buffer", "Load a `pyarrow.Schema` from a buffer written to shared memory", "if v is None else base + datetime.timedelta(seconds=v) for v", "gdf : GpuDataFrame \"\"\" import numpy as np from pygdf.gpuarrow", "pygdf.dataframe import DataFrame from numba import cuda from numba.cuda.cudadrv import", "val = (base + datetime.timedelta(seconds=val)).date() elif typename == 'TIME': val", "in vals] return vals def _extract_description(row_desc): # type: (List[T.TColumnType]) ->", "v is None else seconds_to_time(v) for v in vals] return", "datetime.timedelta(seconds=v) for v in vals] elif typename == 'DATE': vals", "Utility methods for parsing data returned from MapD \"\"\" import", "in vals] elif typename == 'TIME': vals = [None if", "\"scale\", \"comp_param\"]) _typeattr = { 'SMALLINT': 'int', 'INT': 'int', 'BIGINT':", "typename == 'TIMESTAMP': vals = [None if v is None", "---------- buf : pyarrow.Buffer Returns ------- schema : pyarrow.Schema \"\"\"", "val = (base + datetime.timedelta(seconds=val)) elif typename == 'DATE': val", "[None if v is None else seconds_to_time(v) for v in", "typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] nulls = val.nulls vals = getattr(val.data, _typeattr[typename]", "return None val = getattr(val.val, _typeattr[typename] + '_val') base =", "+ '_col') vals = [None if null else v for", "= [None if v is None else (base + datetime.timedelta(seconds=v)).date()", "tuple of (name, type_code, display_size, internal_size, precision, scale, null_ok) https://www.python.org/dev/peps/pep-0249/#description", "vals = [None if null else v for null, v", "DataFrame from numba import cuda from numba.cuda.cudadrv import drvapi from", "cuda from numba.cuda.cudadrv import drvapi from .shm import load_buffer ipc_handle", "type_code, display_size, internal_size, precision, scale, null_ok) https://www.python.org/dev/peps/pep-0249/#description \"\"\" return [Description(col.col_name,", "vals = [None if v is None else seconds_to_time(v) for", "a `pyarrow.Schema` from a buffer written to shared memory Parameters", "= (base + datetime.timedelta(seconds=val)).date() elif typename == 'TIME': val =", "from pygdf.dataframe import DataFrame from numba import cuda from numba.cuda.cudadrv", "typename == 'TIME': vals = [None if v is None", "[None if v is None else (base + datetime.timedelta(seconds=v)).date() for", "def _is_columnar(data): # type: (T.TQueryResult) -> bool return data.row_set.is_columnar def", "rb.to_pandas() def _parse_tdf_gpu(tdf): \"\"\" Parse the results of a select", "as pa reader = pa.RecordBatchStreamReader(buf) return reader.schema def _load_data(buf, schema):", "_extract_row_val(desc, val): # type: (T.TColumnType, T.TDatum) -> Any typename =", "typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] if val.is_null: return None val = getattr(val.val,", "'int', 'DATE': 'int', 'BOOL': 'int', 'FLOAT': 'real', 'DECIMAL': 'real', 'DOUBLE':", "'real', 'DECIMAL': 'real', 'DOUBLE': 'real', 'STR': 'str', } _thrift_types_to_values =", "https://www.python.org/dev/peps/pep-0249/#description \"\"\" return [Description(col.col_name, col.col_type.type, None, None, None, None, col.col_type.nullable)", "'str', } _thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES def _extract_row_val(desc,", "Load a `pandas.DataFrame` from a buffer written to shared memory", "= seconds_to_time(val) return val def _extract_col_vals(desc, val): # type: (T.TColumnType,", "[None if v is None else base + datetime.timedelta(seconds=v) for", "`pandas.DataFrame` from a buffer written to shared memory Parameters ----------", "\"\"\" Load a `pyarrow.Schema` from a buffer written to shared", "else v for null, v in zip(nulls, vals)] base =", "def _extract_column_details(row_desc): # For Connection.get_table_details return [ ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type], x.col_type.nullable,", "buf : pyarrow.Buffer shcema : pyarrow.Schema Returns ------- df :", "results of a select ipc_gpu into a GpuDataFrame Parameters ----------", "row_desc] def _extract_column_details(row_desc): # For Connection.get_table_details return [ ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type],", "buffer written to shared memory Parameters ---------- buf : pyarrow.Buffer", "if typename == 'TIMESTAMP': val = (base + datetime.timedelta(seconds=val)) elif", "1, 1) if typename == 'TIMESTAMP': vals = [None if", "_extract_description(row_desc): # type: (List[T.TColumnType]) -> List[Description] \"\"\" Return a tuple", "into a GpuDataFrame Parameters ---------- tdf : TDataFrame Returns -------", "cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype, gpu_data=dptr) reader = GpuArrowReader(schema_buffer, darr) df =", "1) if typename == 'TIMESTAMP': vals = [None if v", "[None if null else v for null, v in zip(nulls,", "TDataFrame Returns ------- gdf : GpuDataFrame \"\"\" import numpy as", "val.is_null: return None val = getattr(val.val, _typeattr[typename] + '_val') base", "vals = [None if v is None else (base +", "shared memory Parameters ---------- buf : pyarrow.Buffer Returns ------- schema", ".shm import load_buffer ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch = cuda.driver.IpcHandle(None, ipc_handle,", "= pa.RecordBatchStreamReader(buf) return reader.schema def _load_data(buf, schema): \"\"\" Load a", "None, None, None, None, col.col_type.nullable) for col in row_desc] def", "shared memory Parameters ---------- buf : pyarrow.Buffer shcema : pyarrow.Schema", "pa reader = pa.RecordBatchStreamReader(buf) return reader.schema def _load_data(buf, schema): \"\"\"", "base = datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP': val", "= drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size) ctx = cuda.current_context()", "select ipc_gpu into a GpuDataFrame Parameters ---------- tdf : TDataFrame", "val = getattr(val.val, _typeattr[typename] + '_val') base = datetime.datetime(1970, 1,", "datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP': vals = [None", "in vals] elif typename == 'DATE': vals = [None if", "\"type\", \"nullable\", \"precision\", \"scale\", \"comp_param\"]) _typeattr = { 'SMALLINT': 'int',", "def _load_data(buf, schema): \"\"\" Load a `pandas.DataFrame` from a buffer", "= namedtuple(\"Description\", [\"name\", \"type_code\", \"display_size\", \"internal_size\", \"precision\", \"scale\", \"null_ok\"]) ColumnDetails", "schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8) dtype = np.dtype(np.byte) darr = cuda.devicearray.DeviceNDArray(shape=dptr.size,", "import load_buffer ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size)", "} _thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES def _extract_row_val(desc, val):", "def _extract_row_val(desc, val): # type: (T.TColumnType, T.TDatum) -> Any typename", "# TODO: extra copy. schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8) dtype =", "v in vals] elif typename == 'DATE': vals = [None", "Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] nulls = val.nulls vals = getattr(val.data,", "reader.schema def _load_data(buf, schema): \"\"\" Load a `pandas.DataFrame` from a", "vals = getattr(val.data, _typeattr[typename] + '_col') vals = [None if", "= getattr(val.val, _typeattr[typename] + '_val') base = datetime.datetime(1970, 1, 1)", "] def _is_columnar(data): # type: (T.TQueryResult) -> bool return data.row_set.is_columnar", "return reader.schema def _load_data(buf, schema): \"\"\" Load a `pandas.DataFrame` from", "message = pa.read_message(buf) rb = pa.read_record_batch(message, schema) return rb.to_pandas() def", "load_buffer ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size) ctx", "1, 1) if typename == 'TIMESTAMP': val = (base +", "return df def _bind_parameters(operation, parameters): return (text(operation) .bindparams(**parameters) .compile(compile_kwargs={\"literal_binds\": True}))", "vals] return vals def _extract_description(row_desc): # type: (List[T.TColumnType]) -> List[Description]", "\"\"\" import pyarrow as pa reader = pa.RecordBatchStreamReader(buf) return reader.schema", "_parse_tdf_gpu(tdf): \"\"\" Parse the results of a select ipc_gpu into", "for x in row_desc ] def _is_columnar(data): # type: (T.TQueryResult)", "\"\"\" Load a `pandas.DataFrame` from a buffer written to shared", "getattr(val.data, _typeattr[typename] + '_col') vals = [None if null else", "type: (T.TQueryResult) -> bool return data.row_set.is_columnar def _load_schema(buf): \"\"\" Load", "for null, v in zip(nulls, vals)] base = datetime.datetime(1970, 1,", "np.dtype(np.byte) darr = cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype, gpu_data=dptr) reader = GpuArrowReader(schema_buffer,", "[\"name\", \"type_code\", \"display_size\", \"internal_size\", \"precision\", \"scale\", \"null_ok\"]) ColumnDetails = namedtuple(\"ColumnDetails\",", "scale, null_ok) https://www.python.org/dev/peps/pep-0249/#description \"\"\" return [Description(col.col_name, col.col_type.type, None, None, None,", "import pyarrow as pa reader = pa.RecordBatchStreamReader(buf) return reader.schema def", "= np.dtype(np.byte) darr = cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype, gpu_data=dptr) reader =", "size=tdf.df_size) ctx = cuda.current_context() dptr = ipch.open(ctx) schema_buffer = load_buffer(tdf.sm_handle,", "'DATE': vals = [None if v is None else (base", "_thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES def _extract_row_val(desc, val): #", "if val.is_null: return None val = getattr(val.val, _typeattr[typename] + '_val')", "'BOOL': 'int', 'FLOAT': 'real', 'DECIMAL': 'real', 'DOUBLE': 'real', 'STR': 'str',", "in reader.to_dict().items(): df[k] = v return df def _bind_parameters(operation, parameters):", "namedtuple(\"Description\", [\"name\", \"type_code\", \"display_size\", \"internal_size\", \"precision\", \"scale\", \"null_ok\"]) ColumnDetails =", "returned from MapD \"\"\" import datetime from collections import namedtuple", "= v return df def _bind_parameters(operation, parameters): return (text(operation) .bindparams(**parameters)", "pa message = pa.read_message(buf) rb = pa.read_record_batch(message, schema) return rb.to_pandas()", "vals)] base = datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP':", "-> List[Description] \"\"\" Return a tuple of (name, type_code, display_size,", "= datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP': val =", "ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type], x.col_type.nullable, x.col_type.precision, x.col_type.scale, x.col_type.comp_param) for x in row_desc", "'int', 'TIMESTAMP': 'int', 'DATE': 'int', 'BOOL': 'int', 'FLOAT': 'real', 'DECIMAL':", "for col in row_desc] def _extract_column_details(row_desc): # For Connection.get_table_details return", "_load_data(buf, schema): \"\"\" Load a `pandas.DataFrame` from a buffer written", "v in vals] return vals def _extract_description(row_desc): # type: (List[T.TColumnType])", "= T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] nulls = val.nulls vals = getattr(val.data, _typeattr[typename] +", "= getattr(val.data, _typeattr[typename] + '_col') vals = [None if null", "in zip(nulls, vals)] base = datetime.datetime(1970, 1, 1) if typename", "= DataFrame() for k, v in reader.to_dict().items(): df[k] = v", "None else (base + datetime.timedelta(seconds=v)).date() for v in vals] elif", "seconds_to_time(v) for v in vals] return vals def _extract_description(row_desc): #", "pa.RecordBatchStreamReader(buf) return reader.schema def _load_data(buf, schema): \"\"\" Load a `pandas.DataFrame`", "return [ ColumnDetails(x.col_name, _thrift_values_to_types[x.col_type.type], x.col_type.nullable, x.col_type.precision, x.col_type.scale, x.col_type.comp_param) for x", "import mapd.ttypes as T from ._utils import seconds_to_time Description =", "is None else (base + datetime.timedelta(seconds=v)).date() for v in vals]", "of a select ipc_gpu into a GpuDataFrame Parameters ---------- tdf", "vals = [None if v is None else base +", "== 'TIME': vals = [None if v is None else", "= load_buffer(tdf.sm_handle, tdf.sm_size) # TODO: extra copy. schema_buffer = np.frombuffer(schema_buffer.to_pybytes(),", "is None else seconds_to_time(v) for v in vals] return vals", "\"comp_param\"]) _typeattr = { 'SMALLINT': 'int', 'INT': 'int', 'BIGINT': 'int',", "'TIME': vals = [None if v is None else seconds_to_time(v)", "dtype = np.dtype(np.byte) darr = cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype, gpu_data=dptr) reader", "_typeattr = { 'SMALLINT': 'int', 'INT': 'int', 'BIGINT': 'int', 'TIME':", "'DOUBLE': 'real', 'STR': 'str', } _thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types =", "= [None if v is None else base + datetime.timedelta(seconds=v)", "written to shared memory Parameters ---------- buf : pyarrow.Buffer shcema", "from MapD \"\"\" import datetime from collections import namedtuple from", "typename == 'DATE': vals = [None if v is None", "== 'DATE': val = (base + datetime.timedelta(seconds=val)).date() elif typename ==", "in row_desc] def _extract_column_details(row_desc): # For Connection.get_table_details return [ ColumnDetails(x.col_name,", "== 'TIME': val = seconds_to_time(val) return val def _extract_col_vals(desc, val):", "v is None else base + datetime.timedelta(seconds=v) for v in", "T.TDatumType._VALUES_TO_NAMES def _extract_row_val(desc, val): # type: (T.TColumnType, T.TDatum) -> Any", "from ._utils import seconds_to_time Description = namedtuple(\"Description\", [\"name\", \"type_code\", \"display_size\",", "\"\"\" return [Description(col.col_name, col.col_type.type, None, None, None, None, col.col_type.nullable) for", "schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size) # TODO: extra copy. schema_buffer =", "-> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] nulls = val.nulls vals =", "df : pandas.DataFrame \"\"\" import pyarrow as pa message =", "val): # type: (T.TColumnType, T.TColumn) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type]", "import text import mapd.ttypes as T from ._utils import seconds_to_time", "datetime.timedelta(seconds=val)).date() elif typename == 'TIME': val = seconds_to_time(val) return val", "(name, type_code, display_size, internal_size, precision, scale, null_ok) https://www.python.org/dev/peps/pep-0249/#description \"\"\" return", "nulls = val.nulls vals = getattr(val.data, _typeattr[typename] + '_col') vals", "datetime.datetime(1970, 1, 1) if typename == 'TIMESTAMP': val = (base", "elif typename == 'TIME': val = seconds_to_time(val) return val def", "None, col.col_type.nullable) for col in row_desc] def _extract_column_details(row_desc): # For", "datetime.timedelta(seconds=v)).date() for v in vals] elif typename == 'TIME': vals", "getattr(val.val, _typeattr[typename] + '_val') base = datetime.datetime(1970, 1, 1) if", "strides=dtype.itemsize, dtype=dtype, gpu_data=dptr) reader = GpuArrowReader(schema_buffer, darr) df = DataFrame()", "type: (T.TColumnType, T.TColumn) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] nulls =", "vals def _extract_description(row_desc): # type: (List[T.TColumnType]) -> List[Description] \"\"\" Return", "df = DataFrame() for k, v in reader.to_dict().items(): df[k] =", "(T.TQueryResult) -> bool return data.row_set.is_columnar def _load_schema(buf): \"\"\" Load a", "tdf.sm_size) # TODO: extra copy. schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8) dtype", "v return df def _bind_parameters(operation, parameters): return (text(operation) .bindparams(**parameters) .compile(compile_kwargs={\"literal_binds\":", "if v is None else seconds_to_time(v) for v in vals]", "extra copy. schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8) dtype = np.dtype(np.byte) darr", "------- gdf : GpuDataFrame \"\"\" import numpy as np from", "dtype=dtype, gpu_data=dptr) reader = GpuArrowReader(schema_buffer, darr) df = DataFrame() for", "T.TDatum) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] if val.is_null: return None", "DataFrame() for k, v in reader.to_dict().items(): df[k] = v return", "of (name, type_code, display_size, internal_size, precision, scale, null_ok) https://www.python.org/dev/peps/pep-0249/#description \"\"\"", "pyarrow.Buffer Returns ------- schema : pyarrow.Schema \"\"\" import pyarrow as", "type: (List[T.TColumnType]) -> List[Description] \"\"\" Return a tuple of (name,", "[\"name\", \"type\", \"nullable\", \"precision\", \"scale\", \"comp_param\"]) _typeattr = { 'SMALLINT':", ": pandas.DataFrame \"\"\" import pyarrow as pa message = pa.read_message(buf)", "else (base + datetime.timedelta(seconds=v)).date() for v in vals] elif typename", "pyarrow.Schema \"\"\" import pyarrow as pa reader = pa.RecordBatchStreamReader(buf) return", "numba import cuda from numba.cuda.cudadrv import drvapi from .shm import", "= GpuArrowReader(schema_buffer, darr) df = DataFrame() for k, v in", "def _parse_tdf_gpu(tdf): \"\"\" Parse the results of a select ipc_gpu", "GpuArrowReader(schema_buffer, darr) df = DataFrame() for k, v in reader.to_dict().items():", "else seconds_to_time(v) for v in vals] return vals def _extract_description(row_desc):", "drvapi from .shm import load_buffer ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch =", "= T.TDatumType._VALUES_TO_NAMES def _extract_row_val(desc, val): # type: (T.TColumnType, T.TDatum) ->", "\"\"\" Utility methods for parsing data returned from MapD \"\"\"", "def _extract_col_vals(desc, val): # type: (T.TColumnType, T.TColumn) -> Any typename", "Return a tuple of (name, type_code, display_size, internal_size, precision, scale,", "ipc_handle = drvapi.cu_ipc_mem_handle(*tdf.df_handle) ipch = cuda.driver.IpcHandle(None, ipc_handle, size=tdf.df_size) ctx =", "'int', 'INT': 'int', 'BIGINT': 'int', 'TIME': 'int', 'TIMESTAMP': 'int', 'DATE':", "'TIMESTAMP': val = (base + datetime.timedelta(seconds=val)) elif typename == 'DATE':", "pyarrow as pa reader = pa.RecordBatchStreamReader(buf) return reader.schema def _load_data(buf,", "'TIMESTAMP': 'int', 'DATE': 'int', 'BOOL': 'int', 'FLOAT': 'real', 'DECIMAL': 'real',", "dtype=np.uint8) dtype = np.dtype(np.byte) darr = cuda.devicearray.DeviceNDArray(shape=dptr.size, strides=dtype.itemsize, dtype=dtype, gpu_data=dptr)", "return val def _extract_col_vals(desc, val): # type: (T.TColumnType, T.TColumn) ->", ": pyarrow.Buffer Returns ------- schema : pyarrow.Schema \"\"\" import pyarrow", "-> bool return data.row_set.is_columnar def _load_schema(buf): \"\"\" Load a `pyarrow.Schema`", "TODO: extra copy. schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8) dtype = np.dtype(np.byte)", "datetime from collections import namedtuple from sqlalchemy import text import", "[Description(col.col_name, col.col_type.type, None, None, None, None, col.col_type.nullable) for col in", "if null else v for null, v in zip(nulls, vals)]", "= namedtuple(\"ColumnDetails\", [\"name\", \"type\", \"nullable\", \"precision\", \"scale\", \"comp_param\"]) _typeattr =", "\"\"\" import pyarrow as pa message = pa.read_message(buf) rb =", "'DECIMAL': 'real', 'DOUBLE': 'real', 'STR': 'str', } _thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES", "(base + datetime.timedelta(seconds=val)).date() elif typename == 'TIME': val = seconds_to_time(val)", "reader.to_dict().items(): df[k] = v return df def _bind_parameters(operation, parameters): return", "None, None, None, col.col_type.nullable) for col in row_desc] def _extract_column_details(row_desc):", "_is_columnar(data): # type: (T.TQueryResult) -> bool return data.row_set.is_columnar def _load_schema(buf):", "v is None else (base + datetime.timedelta(seconds=v)).date() for v in", "row_desc ] def _is_columnar(data): # type: (T.TQueryResult) -> bool return", "as T from ._utils import seconds_to_time Description = namedtuple(\"Description\", [\"name\",", "x.col_type.comp_param) for x in row_desc ] def _is_columnar(data): # type:", "ipc_handle, size=tdf.df_size) ctx = cuda.current_context() dptr = ipch.open(ctx) schema_buffer =", "(base + datetime.timedelta(seconds=v)).date() for v in vals] elif typename ==", "\"nullable\", \"precision\", \"scale\", \"comp_param\"]) _typeattr = { 'SMALLINT': 'int', 'INT':", "\"null_ok\"]) ColumnDetails = namedtuple(\"ColumnDetails\", [\"name\", \"type\", \"nullable\", \"precision\", \"scale\", \"comp_param\"])", "_load_schema(buf): \"\"\" Load a `pyarrow.Schema` from a buffer written to", "import namedtuple from sqlalchemy import text import mapd.ttypes as T", "---------- buf : pyarrow.Buffer shcema : pyarrow.Schema Returns ------- df", "ColumnDetails = namedtuple(\"ColumnDetails\", [\"name\", \"type\", \"nullable\", \"precision\", \"scale\", \"comp_param\"]) _typeattr", "v in reader.to_dict().items(): df[k] = v return df def _bind_parameters(operation,", "def _load_schema(buf): \"\"\" Load a `pyarrow.Schema` from a buffer written", "GpuArrowReader from pygdf.dataframe import DataFrame from numba import cuda from", "+ '_val') base = datetime.datetime(1970, 1, 1) if typename ==", "v in zip(nulls, vals)] base = datetime.datetime(1970, 1, 1) if", "typename == 'TIME': val = seconds_to_time(val) return val def _extract_col_vals(desc,", "\"\"\" Return a tuple of (name, type_code, display_size, internal_size, precision,", "+ datetime.timedelta(seconds=val)) elif typename == 'DATE': val = (base +", "null, v in zip(nulls, vals)] base = datetime.datetime(1970, 1, 1)", "datetime.timedelta(seconds=val)) elif typename == 'DATE': val = (base + datetime.timedelta(seconds=val)).date()", "vals] elif typename == 'TIME': vals = [None if v", "+ datetime.timedelta(seconds=v) for v in vals] elif typename == 'DATE':", "# type: (T.TColumnType, T.TColumn) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] nulls", "\"\"\" Parse the results of a select ipc_gpu into a", "`pyarrow.Schema` from a buffer written to shared memory Parameters ----------", "a buffer written to shared memory Parameters ---------- buf :", "for v in vals] return vals def _extract_description(row_desc): # type:", "type: (T.TColumnType, T.TDatum) -> Any typename = T.TDatumType._VALUES_TO_NAMES[desc.col_type.type] if val.is_null:", "x.col_type.scale, x.col_type.comp_param) for x in row_desc ] def _is_columnar(data): #", "pygdf.gpuarrow import GpuArrowReader from pygdf.dataframe import DataFrame from numba import", "elif typename == 'DATE': vals = [None if v is", "ipch.open(ctx) schema_buffer = load_buffer(tdf.sm_handle, tdf.sm_size) # TODO: extra copy. schema_buffer", "from numba import cuda from numba.cuda.cudadrv import drvapi from .shm", "reader = GpuArrowReader(schema_buffer, darr) df = DataFrame() for k, v", "df[k] = v return df def _bind_parameters(operation, parameters): return (text(operation)", "'STR': 'str', } _thrift_types_to_values = T.TDatumType._NAMES_TO_VALUES _thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES def", "is None else base + datetime.timedelta(seconds=v) for v in vals]", "Parameters ---------- tdf : TDataFrame Returns ------- gdf : GpuDataFrame", "= val.nulls vals = getattr(val.data, _typeattr[typename] + '_col') vals =", "= pa.read_record_batch(message, schema) return rb.to_pandas() def _parse_tdf_gpu(tdf): \"\"\" Parse the", ": GpuDataFrame \"\"\" import numpy as np from pygdf.gpuarrow import", "'BIGINT': 'int', 'TIME': 'int', 'TIMESTAMP': 'int', 'DATE': 'int', 'BOOL': 'int',", "\"\"\" import datetime from collections import namedtuple from sqlalchemy import", "val.nulls vals = getattr(val.data, _typeattr[typename] + '_col') vals = [None", "None, None, col.col_type.nullable) for col in row_desc] def _extract_column_details(row_desc): #", "_thrift_values_to_types = T.TDatumType._VALUES_TO_NAMES def _extract_row_val(desc, val): # type: (T.TColumnType, T.TDatum)", "v for null, v in zip(nulls, vals)] base = datetime.datetime(1970,", "= [None if v is None else seconds_to_time(v) for v", ": pyarrow.Buffer shcema : pyarrow.Schema Returns ------- df : pandas.DataFrame", "== 'TIMESTAMP': vals = [None if v is None else", "Returns ------- df : pandas.DataFrame \"\"\" import pyarrow as pa", "memory Parameters ---------- buf : pyarrow.Buffer shcema : pyarrow.Schema Returns", "ipc_gpu into a GpuDataFrame Parameters ---------- tdf : TDataFrame Returns", "= (base + datetime.timedelta(seconds=val)) elif typename == 'DATE': val =", "typename == 'TIMESTAMP': val = (base + datetime.timedelta(seconds=val)) elif typename", "return vals def _extract_description(row_desc): # type: (List[T.TColumnType]) -> List[Description] \"\"\"", "pyarrow as pa message = pa.read_message(buf) rb = pa.read_record_batch(message, schema)", "return [Description(col.col_name, col.col_type.type, None, None, None, None, col.col_type.nullable) for col", "_typeattr[typename] + '_val') base = datetime.datetime(1970, 1, 1) if typename", "pa.read_record_batch(message, schema) return rb.to_pandas() def _parse_tdf_gpu(tdf): \"\"\" Parse the results", "load_buffer(tdf.sm_handle, tdf.sm_size) # TODO: extra copy. schema_buffer = np.frombuffer(schema_buffer.to_pybytes(), dtype=np.uint8)", "+ datetime.timedelta(seconds=val)).date() elif typename == 'TIME': val = seconds_to_time(val) return", "x in row_desc ] def _is_columnar(data): # type: (T.TQueryResult) ->", "------- df : pandas.DataFrame \"\"\" import pyarrow as pa message" ]
[ "of time column in the dataframe. secondary_time_index (dict[str -> str]):", "inferred_variable_types = infer_variable_types(self.df, link_vars, variable_types, time_index, secondary_time_index) inferred_variable_types.update(variable_types) for v", "column in the dataframe. secondary_time_index (dict[str -> str]): Dictionary mapping", "was used instead\".format(vtype)) if index not in variable_types: variable_types[index] =", "else: variable_types[vid] = string_to_class_map['unknown'] warnings.warn(\"Variable type {} was unrecognized, Unknown", "is None: self.entityset.time_type = time_type elif self.entityset.time_type != time_type: raise", "= vtypes.PandasTypes._pandas_numerics _categorical_types = [vtypes.PandasTypes._categorical] _datetime_types = vtypes.PandasTypes._pandas_datetimes class Entity(object):", "self.set_index(self.index) if self.time_index is not None: self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if", "for v in self.variables: repr_out += u\"\\n {} (dtype: {})\".format(v.id,", "None \"\"\" for variable in self.variables: # some heuristics to", "(ft.Variable, dict_of_kwargs) _v = vtype[0](v, self, **vtype[1]) else: _v =", "a relational database See Also: :class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet` \"\"\" def", "dd.DataFrame): df[index] = 1 df[index] = df[index].cumsum() - 1 elif", "Raises: RuntimeError : if no variable exist with provided id", "from featuretools.utils.gen_utils import import_or_none, is_instance from featuretools.utils.wrangle import _check_time_type, _dataframes_equal", "time_type)) if time_index not in columns: columns.append(time_index) self.secondary_time_index = secondary_time_index", "def _create_index(index, make_index, df): '''Handles index creation logic base on", "vtypes.Index, convert_data=False) self.index = variable_id def set_secondary_time_index(self, secondary_time_index): for time_index,", "no errors or warnings # (Case 4 also uses this", "0.95: if verbose: msg = \"Variable {}: Marking {} as", "count, # and add interesting values to each variable total_count", "to types (:class:`.Variable`) or type_strings (str) or (type, kwargs) to", "other, deep=False): if self.index != other.index: return False if self.time_index", "index][0] self.variables = [index_variable] + [v for v in variables", "optional) : If True, assume index does not exist as", "unique: assert self.df.index.is_unique, \"Index is not unique on dataframe \"", "skip = False for r in self.entityset.relationships: if variable in", "= 1 df[index] = df[index].cumsum() - 1 elif is_instance(df, ks,", "= None if time_index: self.set_time_index(time_index, already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index) def __repr__(self): repr_out", "is %s type which differs from\" \" other entityset time", "self._get_variable(variable_id) def _get_variable(self, variable_id): \"\"\"Get variable instance Args: variable_id (str)", "(0, len(dataframe)). Otherwise, assume index exists in dataframe. \"\"\" _validate_entity_params(id,", "if v.id == index][0] self.variables = [index_variable] + [v for", "= self.df.drop(variable_ids, axis=1) for v_id in variable_ids: v = self._get_variable(v_id)", "Entity inputs''' assert isinstance(id, str), \"Entity id must be a", "= created_index self._verbose = verbose secondary_time_index = secondary_time_index or {}", "RuntimeError : if no variable exist with provided id \"\"\"", "counts = self.df[variable.id].value_counts() # find how many of each unique", "variable_types[vid] if isinstance(vtype, str): if vtype in string_to_class_map: variable_types[vid] =", "once we've inferred self.df = convert_all_variable_data(df=self.df, variable_types=inferred_variable_types) # make sure", "specified but no index supplied, use first column warnings.warn((\"Using first", "== len(set(df.columns)), \"Duplicate column names\" for c in df.columns: if", "convert underlying data in the EntitySet. Raises: RuntimeError : Raises", "strings (Column {} \" \"is not a string)\".format(c)) if time_index", "= self.shape repr_out += u\"\\n Shape:\\n (Rows: {}, Columns: {})\".format(", "return {v.id: type(v) for v in self.variables} def convert_variable_type(self, variable_id,", "self.data.values()]) @property def df(self): '''Dataframe providing the data for the", "each instance across all child entities. make_index (bool, optional) :", "a list of variable names') if len(variable_ids) == 0: return", "interesting_values if it represents more than # 25% of the", "if is_instance(self.df, (dd, ks), 'DataFrame'): t = time_type # skip", "%s not found in entity\" % (variable_id)) @property def variable_types(self):", "use first column warnings.warn((\"Using first column as index. \" \"To", "repr_out += u\" Variables:\" for v in self.variables: repr_out +=", "are associated with. last_time_index (pd.Series): Time index of the last", "sort if not already_sorted: # sort by time variable, then", "not None: self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description() def add_interesting_values(self, max_values=5, verbose=False): \"\"\" Find", "% (self.id, time_type)) if is_instance(self.df, (dd, ks), 'DataFrame'): t =", "self.index = variable_id def set_secondary_time_index(self, secondary_time_index): for time_index, columns in", "dict maps string variable ids to types (:class:`.Variable`) or type_strings", "an existing variable to set as index. unique (bool) :", "isinstance(self.df, pd.DataFrame) or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype] else: time_to_check =", "df(self): '''Dataframe providing the data for the entity.''' return self.data[\"df\"]", "as index. \" \"To change this, specify the index parameter\"))", "(type, kwargs) to pass keyword arguments to the Variable. index", "len(df.columns) != len(self.variables): raise ValueError(\"Updated dataframe contains {} columns, expecting", "time_index (str): Name of time column in the dataframe. secondary_time_index", "inferred_variable_types[v] if isinstance(vtype, tuple): # vtype is (ft.Variable, dict_of_kwargs) _v", "# (Case 4 also uses this code path) if isinstance(df,", "warn warnings.warn(\"index {} not found in dataframe, creating new \"", "self.entityset.time_type != time_type: raise TypeError(\"%s time index is %s type", "idx = counts.index[i] # add the value to interesting_values if", "self.set_time_index(time_index, already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index) def __repr__(self): repr_out = u\"Entity: {}\\n\".format(self.id) repr_out", "there are; sort by count, # and add interesting values", "dataframe is missing new {} column\".format(v.id)) # Make sure column", "values for categorical variables, to be used to generate \"where\"", "and other.last_time_index is not None: if not self.last_time_index.equals(other.last_time_index): return False", "_validate_entity_params(id, df, time_index): '''Validation checks for Entity inputs''' assert isinstance(id,", "df.columns: if not isinstance(c, str): raise ValueError(\"All column names must", "not in df.columns: raise ValueError(\"Updated dataframe is missing new {}", "from featuretools.variable_types import Text, find_variable_types ks = import_or_none('databricks.koalas') logger =", "variable_ids: v = self._get_variable(v_id) self.variables.remove(v) def set_time_index(self, variable_id, already_sorted=False): #", "stores relevant metadata and data An Entity is analogous to", "variable_types.copy(): vtype = variable_types[vid] if isinstance(vtype, str): if vtype in", "constraints # don't add interesting values for entities in relationships", "as dd import numpy as np import pandas as pd", "does not exist as a column in dataframe, and create", "repr_out = u\"Entity: {}\\n\".format(self.id) repr_out += u\" Variables:\" for v", "variable in self.variables} for variable in other.variables: variables[variable] += (variable,", "\"\"\" Create Entity Args: id (str): Id of Entity. df", "errors or warnings # (Case 4 also uses this code", "variable_types[vid] = string_to_class_map[vtype] else: variable_types[vid] = string_to_class_map['unknown'] warnings.warn(\"Variable type {}", "id (str): Id of Entity. df (pd.DataFrame): Dataframe providing the", "relational database See Also: :class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet` \"\"\" def __init__(self,", "type {} was unrecognized, Unknown variable type was used instead\".format(vtype))", "on user input''' created_index = None if index is None:", "other.index: return False if self.time_index != other.time_index: return False if", "convert_data: # first, convert the underlying data (or at least", "if isinstance(df, dd.DataFrame): df[index] = 1 df[index] = df[index].cumsum() -", "column and warn warnings.warn(\"index {} not found in dataframe, creating", "Maximum number of values per variable to add. verbose (bool)", "Type of variable to convert to. entityset (:class:`.BaseEntitySet`) : EntitySet", "verbose secondary_time_index = secondary_time_index or {} self._create_variables(variable_types, index, time_index, secondary_time_index)", "in df.columns: if not make_index: # Case 4: user names", "!= other.time_index: return False if self.secondary_time_index != other.secondary_time_index: return False", "or warnings # (Case 4 also uses this code path)", "warnings.warn(\"index {} not found in dataframe, creating new \" \"integer", "return False if deep: if self.last_time_index is None and other.last_time_index", "time \"\"\" variables = [] variable_types = variable_types.copy() or {}", "+= (variable, ) for self_var, other_var in variables.values(): if not", "unrecognized, Unknown variable type was used instead\".format(vtype)) if index not", "If True, convert underlying data in the EntitySet. Raises: RuntimeError", "(pd.DataFrame): Dataframe providing the data for the entity. entityset (EntitySet):", "# check time type if not isinstance(self.df, pd.DataFrame) or self.df.empty:", "variable_ids (list[str]): Variables to delete Returns: None \"\"\" # check", "is (ft.Variable, dict_of_kwargs) _v = vtype[0](v, self, **vtype[1]) else: _v", "t = vtypes.NumericTimeIndex if col_is_datetime(self.df[variable_id]): t = vtypes.DatetimeTimeIndex # use", "index supplied, use first column warnings.warn((\"Using first column as index.", "this entity. convert_data (bool) : If True, convert underlying data", "(dict[str -> str]): Dictionary mapping columns in the dataframe to", "is already in df. No action needed. return created_index, index,", "not exist as a column in dataframe, and create a", "a table in a relational database See Also: :class:`.Relationship`, :class:`.Variable`,", "variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) # total_count -= counts[idx] else: break self.entityset.reset_data_description()", "recalculate_last_time_indexes=True): '''Update entity's internal dataframe, optionaly making sure data is", "index_variable = [v for v in variables if v.id ==", "''' Time index of the last event for each instance", "of Entity. df (pd.DataFrame): Dataframe providing the data for the", "import import_or_none, is_instance from featuretools.utils.wrangle import _check_time_type, _dataframes_equal from featuretools.variable_types", "in df.columns: raise ValueError(\"Updated dataframe is missing new {} column\".format(v.id))", "a list of columns that depend on that secondary time", "= [v for v in variables if v.id == index][0]", "vtypes.Discrete): variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype) # TODO - consider removing this", "it cannot convert the underlying data Examples: >>> from featuretools.tests.testing_utils", "name using integers the (0, len(dataframe)). Otherwise, assume index exists", "variable in other.variables: variables[variable] += (variable, ) for self_var, other_var", "True def __sizeof__(self): return sum([value.__sizeof__() for value in self.data.values()]) @property", "time_index) created_index, index, df = _create_index(index, make_index, df) self.id =", "specify # make_index. Make new index column and warn warnings.warn(\"index", "column names must be strings (Column {} \" \"is not", "vtypes.PandasTypes._pandas_datetimes class Entity(object): \"\"\"Represents an entity in a Entityset, and", "mapping columns in the dataframe to the time index column", "underlying data Examples: >>> from featuretools.tests.testing_utils import make_ecommerce_entityset >>> es", "if index is None: # Case 1: user wanted to", "interesting values found. Returns: None \"\"\" for variable in self.variables:", "of `Variable`) : Type of variable to convert to. entityset", "raise RuntimeError(\"Cannot make index: index variable already present\") elif index", "already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if recalculate_last_time_indexes and self.last_time_index is not None: self.entityset.add_last_time_indexes(updated_entities=[self.id])", "\"\"\" _validate_entity_params(id, df, time_index) created_index, index, df = _create_index(index, make_index,", "None): Name of time_index column secondary_time_index (dict[str: [str]]): Dictionary of", "warnings.warn((\"Using first column as index. \" \"To change this, specify", "**kwargs) # replace the old variable with the new one,", "in the EntitySet. Raises: RuntimeError : Raises if it cannot", "the values we have not seen so far if len(counts.index)", "different type Args: variable_id (str) : Id of variable to", "return False if self.secondary_time_index != other.secondary_time_index: return False if len(self.variables)", "secondary_time_index (dict[str -> str]): Dictionary mapping columns in the dataframe", "index # Case 6: user specified index, which is already", "shape[0], shape[1]) return repr_out @property def shape(self): '''Shape of the", "with. last_time_index (pd.Series): Time index of the last event for", "= {variable: (variable, ) for variable in self.variables} for variable", "@property def df(self): '''Dataframe providing the data for the entity.'''", "secondary_time_index=None, last_time_index=None, already_sorted=False, make_index=False, verbose=False): \"\"\" Create Entity Args: id", "not self_var.__eq__(other_var, deep=True): return False return True def __sizeof__(self): return", "variable total_count = np.sum(counts) counts[:] = counts.sort_values()[::-1] for i in", "if variable in [r.child_variable, r.parent_variable]: skip = True break if", "variables if v.id != index] def update_data(self, df, already_sorted=False, recalculate_last_time_indexes=True):", "def add_interesting_values(self, max_values=5, verbose=False): \"\"\" Find interesting values for categorical", "t = vtypes.DatetimeTimeIndex # use stable sort if not already_sorted:", "% (self.id)) if self.entityset.time_type != time_type: raise TypeError(\"%s time index", "self.variables: if v.id == variable_id: return v raise KeyError(\"Variable: %s", "Dataframe providing the data for the entity. entityset (EntitySet): Entityset", "variable in [r.child_variable, r.parent_variable]: skip = True break if skip:", "use stable sort if not already_sorted: # sort by time", "the underlying data Examples: >>> from featuretools.tests.testing_utils import make_ecommerce_entityset >>>", "v in self.variables]] self.set_index(self.index) if self.time_index is not None: self.set_time_index(self.time_index,", "def _validate_entity_params(id, df, time_index): '''Validation checks for Entity inputs''' assert", "also uses this code path) if isinstance(df, dd.DataFrame): df[index] =", "if len(self.variables) != len(other.variables): return False if set(self.variables) != set(other.variables):", "= string_to_class_map['unknown'] warnings.warn(\"Variable type {} was unrecognized, Unknown variable type", "self.variables: # some heuristics to find basic 'where'-able variables if", "self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if recalculate_last_time_indexes and self.last_time_index is not None:", "last event for each instance across all child entities. '''", "each instance across all child entities. ''' return self.data[\"last_time_index\"] @last_time_index.setter", "if self.entityset.time_type != time_type: raise TypeError(\"%s time index is %s", "Create Entity Args: id (str): Id of Entity. df (pd.DataFrame):", "= self.df[time_index].head(1).iloc[0] time_type = _check_time_type(time_to_check) if time_type is None: raise", "if time_type is None: raise TypeError(\"%s time index not recognized", "new index column and warn warnings.warn(\"index {} not found in", "@property def variable_types(self): '''Dictionary mapping variable id's to variable types'''", "type/str/dict[str -> type]]) : An entity's variable_types dict maps string", "per variable to add. verbose (bool) : If True, print", "found in dataframe, creating new \" \"integer column\".format(index)) # Case", "time_index column secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns", "import warnings import dask.dataframe as dd import numpy as np", "!= index] def update_data(self, df, already_sorted=False, recalculate_last_time_indexes=True): '''Update entity's internal", "= self._get_variable(v_id) self.variables.remove(v) def set_time_index(self, variable_id, already_sorted=False): # check time", "id column in the dataframe. time_index (str): Name of time", "created_index = index # Case 6: user specified index, which", "_categorical_types = [vtypes.PandasTypes._categorical] _datetime_types = vtypes.PandasTypes._pandas_datetimes class Entity(object): \"\"\"Represents an", "== index][0] self.variables = [index_variable] + [v for v in", "drop=False) self.df.index.name = None if unique: assert self.df.index.is_unique, \"Index is", "by index self.df = self.df.sort_values([variable_id, self.index]) self.convert_variable_type(variable_id, t, convert_data=False) self.time_index", "time_to_check = self.df[variable_id].iloc[0] time_type = _check_time_type(time_to_check) if time_type is None:", "in variables if v.id == index][0] self.variables = [index_variable] +", "variables from a dataframe Args: variable_types (dict[str -> types/str/dict[str ->", "Variable. index (str): Name of id column in the dataframe.", "add interesting values for entities in relationships skip = False", "\" \\ \"(Entity {})\".format(self.id) self.convert_variable_type(variable_id, vtypes.Index, convert_data=False) self.index = variable_id", "with no errors or warnings # (Case 4 also uses", "type which differs from\" \" other entityset time indexes\" %", "must be a list of variable names') if len(variable_ids) ==", "{} was unrecognized, Unknown variable type was used instead\".format(vtype)) if", "as pd from featuretools import variable_types as vtypes from featuretools.utils.entity_utils", "time_index=None, secondary_time_index=None, last_time_index=None, already_sorted=False, make_index=False, verbose=False): \"\"\" Create Entity Args:", "(:class:`.BaseEntitySet`) : EntitySet associated with this entity. convert_data (bool) :", "False if deep: if self.last_time_index is None and other.last_time_index is", "u\"\\n {} (dtype: {})\".format(v.id, v.type_string) shape = self.shape repr_out +=", "code path) if isinstance(df, dd.DataFrame): df[index] = 1 df[index] =", "col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types ) from featuretools.utils.gen_utils import import_or_none,", "\"Entity id must be a string\" assert len(df.columns) == len(set(df.columns)),", "make_index. Make new index column and warn warnings.warn(\"index {} not", "columns.append(time_index) self.secondary_time_index = secondary_time_index def _create_index(index, make_index, df): '''Handles index", "to interesting_values if it represents more than # 25% of", "# convert data once we've inferred self.df = convert_all_variable_data(df=self.df, variable_types=inferred_variable_types)", "self.data[\"df\"] @df.setter def df(self, _df): self.data[\"df\"] = _df @property def", "= {'df': df, 'last_time_index': last_time_index} self.created_index = created_index self._verbose =", "as a column in dataframe, and create a new column", "new_type (subclass of `Variable`) : Type of variable to convert", "inferred self.df = convert_all_variable_data(df=self.df, variable_types=inferred_variable_types) # make sure index is", "return id(self.id) def __getitem__(self, variable_id): return self._get_variable(variable_id) def _get_variable(self, variable_id):", "instance Args: variable_id (str) : Id of variable to get.", "delete_variables(self, variable_ids): \"\"\" Remove variables from entity's dataframe and from", "{} (dtype: {})\".format(v.id, v.type_string) shape = self.shape repr_out += u\"\\n", "in df.columns: # Case 3: user wanted to make index", "self.id = id self.entityset = entityset self.data = {'df': df,", "names\" for c in df.columns: if not isinstance(c, str): raise", "value\" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) # total_count -= counts[idx]", "secondary_time_index) inferred_variable_types.update(variable_types) for v in inferred_variable_types: # TODO document how", "get. Returns: :class:`.Variable` : Instance of variable. Raises: RuntimeError :", "fraction > 0.05 and fraction < 0.95: if verbose: msg", "**vtype[1]) else: _v = inferred_variable_types[v](v, self) variables += [_v] #", "a Entityset, and stores relevant metadata and data An Entity", "to assert that the index is unique. \"\"\" if isinstance(self.df,", "(Case 4 also uses this code path) if isinstance(df, dd.DataFrame):", "[_v] # convert data once we've inferred self.df = convert_all_variable_data(df=self.df,", "variable_types, index, time_index, secondary_time_index): \"\"\"Extracts the variables from a dataframe", "instance across all child entities. make_index (bool, optional) : If", "id(self.id) def __getitem__(self, variable_id): return self._get_variable(variable_id) def _get_variable(self, variable_id): \"\"\"Get", "new one, maintaining order variable = self._get_variable(variable_id) new_variable = new_type.create_from(variable)", "Shape:\\n (Rows: {}, Columns: {})\".format( shape[0], shape[1]) return repr_out @property", "{} as an \" msg += \"interesting value\" logger.info(msg.format(variable.id, idx))", "the entity. entityset (EntitySet): Entityset for this Entity. variable_types (dict[str", "types (:class:`.Variable`) or type_strings (str) or (type, kwargs) to pass", "in range(min(max_values, len(counts.index))): idx = counts.index[i] # add the value", "wanted to make index but did not specify column name", "self.data[\"last_time_index\"] @last_time_index.setter def last_time_index(self, lti): self.data[\"last_time_index\"] = lti def __hash__(self):", "specify the index parameter\")) index = df.columns[0] elif make_index and", "ValueError(\"All column names must be strings (Column {} \" \"is", "return self.df.shape def __eq__(self, other, deep=False): if self.index != other.index:", "self.secondary_time_index != other.secondary_time_index: return False if len(self.variables) != len(other.variables): return", "index, which is already in df. No action needed. return", "if len(variable_ids) == 0: return self.df = self.df.drop(variable_ids, axis=1) for", "time_index not in df.columns: raise LookupError('Time index not found in", "{variable: (variable, ) for variable in self.variables} for variable in", "variable_id): \"\"\"Get variable instance Args: variable_id (str) : Id of", "np import pandas as pd from featuretools import variable_types as", "False for r in self.entityset.relationships: if variable in [r.child_variable, r.parent_variable]:", "# skip checking values already_sorted = True # skip sorting", "column name assert not make_index, \"Must specify an index name", "does not specify # make_index. Make new index column and", "# vtype is (ft.Variable, dict_of_kwargs) _v = vtype[0](v, self, **vtype[1])", "v = self._get_variable(v_id) self.variables.remove(v) def set_time_index(self, variable_id, already_sorted=False): # check", "vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype] else: time_to_check = self.df[time_index].head(1).iloc[0] time_type = _check_time_type(time_to_check) if time_type", "list of columns that depend on that secondary time \"\"\"", "dict maps string variable ids to types (:class:`.Variable`) or type_string", "stable sort if not already_sorted: # sort by time variable,", "{}, Columns: {})\".format( shape[0], shape[1]) return repr_out @property def shape(self):", "the entity's dataframe''' return self.df.shape def __eq__(self, other, deep=False): if", "dataframe and from self.variables Args: variable_ids (list[str]): Variables to delete", "for categorical variables, to be used to generate \"where\" clauses", "Entityset, and stores relevant metadata and data An Entity is", "+= u\" Variables:\" for v in self.variables: repr_out += u\"\\n", "or\" \" datetime\" % (self.id)) if self.entityset.time_type != time_type: raise", "self.last_time_index is not None and other.last_time_index is not None: if", "in self.data.values()]) @property def df(self): '''Dataframe providing the data for", "time_type = _check_time_type(time_to_check) if time_type is None: raise TypeError(\"%s time", "{} string_to_class_map = find_variable_types() # TODO: Remove once Text has", "id, df, entityset, variable_types=None, index=None, time_index=None, secondary_time_index=None, last_time_index=None, already_sorted=False, make_index=False,", "else: df.insert(0, index, range(len(df))) created_index = index # Case 6:", "self.data = {'df': df, 'last_time_index': last_time_index} self.created_index = created_index self._verbose", "action needed. return created_index, index, df def _validate_entity_params(id, df, time_index):", "some heuristics to find basic 'where'-able variables if isinstance(variable, vtypes.Discrete):", "time_index (str or None): Name of time_index column secondary_time_index (dict[str:", "number of values per variable to add. verbose (bool) :", "vtypes.Index link_vars = get_linked_vars(self) inferred_variable_types = infer_variable_types(self.df, link_vars, variable_types, time_index,", "if self.last_time_index is None and other.last_time_index is not None: return", "variable_id (str) : Id of variable to get. Returns: :class:`.Variable`", "to other entities are consistent, and last_time_indexes are consistent. '''", "wanted to make index but column already exists raise RuntimeError(\"Cannot", "_get_variable(self, variable_id): \"\"\"Get variable instance Args: variable_id (str) : Id", "of an existing variable to set as index. unique (bool)", "and other.last_time_index is not None: return False elif self.last_time_index is", "not found in dataframe, creating new \" \"integer column\".format(index)) #", "using integers the (0, len(dataframe)). Otherwise, assume index exists in", "None if time_index: self.set_time_index(time_index, already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index) def __repr__(self): repr_out =", "the (0, len(dataframe)). Otherwise, assume index exists in dataframe. \"\"\"", "integers the (0, len(dataframe)). Otherwise, assume index exists in dataframe.", "for time_index, columns in secondary_time_index.items(): if is_instance(self.df, (dd, ks), 'DataFrame')", "isinstance(self.df, pd.DataFrame): self.df = self.df.set_index(self.df[variable_id], drop=False) self.df.index.name = None if", "dict_of_kwargs) _v = vtype[0](v, self, **vtype[1]) else: _v = inferred_variable_types[v](v,", "created_index, index, df = _create_index(index, make_index, df) self.id = id", "self._get_variable(v_id) self.variables.remove(v) def set_time_index(self, variable_id, already_sorted=False): # check time type", "dd import numpy as np import pandas as pd from", "add interesting values to each variable total_count = np.sum(counts) counts[:]", "r in self.entityset.relationships: if variable in [r.child_variable, r.parent_variable]: skip =", "column ordering matches variable ordering self.df = df[[v.id for v", "# check if variable is not a list if not", "find basic 'where'-able variables if isinstance(variable, vtypes.Discrete): variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype)", "\"Index is not unique on dataframe \" \\ \"(Entity {})\".format(self.id)", "(EntitySet): Entityset for this Entity. variable_types (dict[str -> type/str/dict[str ->", "\" msg += \"interesting value\" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx]))", "from entity's dataframe and from self.variables Args: variable_ids (list[str]): Variables", "break self.entityset.reset_data_description() def delete_variables(self, variable_ids): \"\"\" Remove variables from entity's", "data once we've inferred self.df = convert_all_variable_data(df=self.df, variable_types=inferred_variable_types) # make", "reference indexes to other entities are consistent, and last_time_indexes are", "convert_data=False) self.index = variable_id def set_secondary_time_index(self, secondary_time_index): for time_index, columns", ") from featuretools.utils.gen_utils import import_or_none, is_instance from featuretools.utils.wrangle import _check_time_type,", "last_time_indexes are consistent. ''' if len(df.columns) != len(self.variables): raise ValueError(\"Updated", "heuristics to find basic 'where'-able variables if isinstance(variable, vtypes.Discrete): variable.interesting_values", "counts.sort_values()[::-1] for i in range(min(max_values, len(counts.index))): idx = counts.index[i] #", "index, range(len(df))) created_index = index # Case 6: user specified", "%s type which differs from\" \" other entityset time indexes\"", "not None: if not self.last_time_index.equals(other.last_time_index): return False if not _dataframes_equal(self.df,", "len(self.variables): raise ValueError(\"Updated dataframe contains {} columns, expecting {}\".format(len(df.columns), len(self.variables)))", "axis=1) for v_id in variable_ids: v = self._get_variable(v_id) self.variables.remove(v) def", "tuple vtype = inferred_variable_types[v] if isinstance(vtype, tuple): # vtype is", "def __getitem__(self, variable_id): return self._get_variable(variable_id) def _get_variable(self, variable_id): \"\"\"Get variable", "else: t = vtypes.NumericTimeIndex if col_is_datetime(self.df[variable_id]): t = vtypes.DatetimeTimeIndex #", "variable_id (str) : Id of variable to convert. new_type (subclass", "None: # Case 1: user wanted to make index but", "find how many of each unique value there are; sort", "self.secondary_time_index = secondary_time_index def _create_index(index, make_index, df): '''Handles index creation", "return repr_out @property def shape(self): '''Shape of the entity's dataframe'''", "an index name if make_index is True\" # Case 2:", "used to generate \"where\" clauses Args: max_values (int) : Maximum", "= variable.interesting_values.append(pd.Series([idx])) else: fraction = counts[idx] / total_count if fraction", "make index but column already exists raise RuntimeError(\"Cannot make index:", "\"Must specify an index name if make_index is True\" #", "= False for r in self.entityset.relationships: if variable in [r.child_variable,", "= [index_variable] + [v for v in variables if v.id", "variable type was used instead\".format(vtype)) if index not in variable_types:", "at the beginning index_variable = [v for v in variables", "relevant metadata and data An Entity is analogous to a", "try to) self.df = convert_variable_data(df=self.df, column_id=variable_id, new_type=new_type, **kwargs) # replace", "type Args: variable_id (str) : Id of variable to convert.", "6: user specified index, which is already in df. No", "is not None and time_index not in df.columns: raise LookupError('Time", "add. verbose (bool) : If True, print summary of interesting", "if vtype in string_to_class_map: variable_types[vid] = string_to_class_map[vtype] else: variable_types[vid] =", "# TODO: Remove once Text has been removed from variable", "= variable_id def set_secondary_time_index(self, secondary_time_index): for time_index, columns in secondary_time_index.items():", "for entities in relationships skip = False for r in", "index, time_index, secondary_time_index): \"\"\"Extracts the variables from a dataframe Args:", "on dataframe \" \\ \"(Entity {})\".format(self.id) self.convert_variable_type(variable_id, vtypes.Index, convert_data=False) self.index", "of the last event for each instance across all child", "find_variable_types() # TODO: Remove once Text has been removed from", "have not seen so far if len(counts.index) < 25: if", "variables from entity's dataframe and from self.variables Args: variable_ids (list[str]):", "columns that each map to a list of columns that", "in variable_types: variable_types[index] = vtypes.Index link_vars = get_linked_vars(self) inferred_variable_types =", "in secondary_time_index.items(): if is_instance(self.df, (dd, ks), 'DataFrame') or self.df.empty: time_to_check", "used instead\".format(vtype)) if index not in variable_types: variable_types[index] = vtypes.Index", "None and time_index not in df.columns: raise LookupError('Time index not", "column\".format(index)) # Case 5: make_index with no errors or warnings", "delete Returns: None \"\"\" # check if variable is not", "secondary_time_index): for time_index, columns in secondary_time_index.items(): if is_instance(self.df, (dd, ks),", "df, time_index): '''Validation checks for Entity inputs''' assert isinstance(id, str),", "variable ordering self.df = df[[v.id for v in self.variables]] self.set_index(self.index)", "or\" \" datetime\" % (self.id)) if self.entityset.time_type is None: self.entityset.time_type", "data (or at least try to) self.df = convert_variable_data(df=self.df, column_id=variable_id,", "= self.df.set_index(self.df[variable_id], drop=False) self.df.index.name = None if unique: assert self.df.index.is_unique,", "Returns: None \"\"\" # check if variable is not a", "of variable to convert to. entityset (:class:`.BaseEntitySet`) : EntitySet associated", "Marking {} as an \" msg += \"interesting value\" logger.info(msg.format(variable.id,", "if isinstance(vtype, str): if vtype in string_to_class_map: variable_types[vid] = string_to_class_map[vtype]", "string variable ids to types (:class:`.Variable`) or type_strings (str) or", "[str]]): Dictionary of secondary time columns that each map to", "time type if not isinstance(self.df, pd.DataFrame) or self.df.empty: time_to_check =", "not in df. does not specify # make_index. Make new", "self.entityset.time_type = time_type elif self.entityset.time_type != time_type: raise TypeError(\"%s time", "+= \"interesting value\" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) # total_count", "Unknown variable type was used instead\".format(vtype)) if index not in", "or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype] else: time_to_check = self.df[variable_id].iloc[0] time_type", "to the Variable. index (str): Name of id column in", "in entity\" % (variable_id)) @property def variable_types(self): '''Dictionary mapping variable", ": Instance of variable. Raises: RuntimeError : if no variable", "self.df = convert_variable_data(df=self.df, column_id=variable_id, new_type=new_type, **kwargs) # replace the old", "instead\".format(vtype)) if index not in variable_types: variable_types[index] = vtypes.Index link_vars", "{}: Marking {} as an \" msg += \"interesting value\"", "\"\"\" for variable in self.variables: # some heuristics to find", "self.last_time_index is None and other.last_time_index is not None: return False", "TODO - consider removing this constraints # don't add interesting", "this code path) if isinstance(df, dd.DataFrame): df[index] = 1 df[index]", "new_variable = new_type.create_from(variable) self.variables[self.variables.index(variable)] = new_variable def _create_variables(self, variable_types, index,", "create a new column of that name using integers the", "the Variable. index (str): Name of id column in the", "time_index, columns in secondary_time_index.items(): if is_instance(self.df, (dd, ks), 'DataFrame') or", "in the dataframe. secondary_time_index (dict[str -> str]): Dictionary mapping columns", "if len(counts.index) < 25: if verbose: msg = \"Variable {}:", "string_to_class_map['unknown'] warnings.warn(\"Variable type {} was unrecognized, Unknown variable type was", "in df. No action needed. return created_index, index, df def", "time_index, secondary_time_index) self.df = df[[v.id for v in self.variables]] self.set_index(index)", "Name of time column in the dataframe. secondary_time_index (dict[str ->", "self.variables} for variable in other.variables: variables[variable] += (variable, ) for", "with this entity. convert_data (bool) : If True, convert underlying", "variable already present\") elif index not in df.columns: if not", "from\" \" other entityset time indexes\" % (self.id, time_type)) if", "an \" msg += \"interesting value\" logger.info(msg.format(variable.id, idx)) variable.interesting_values =", "# Case 4: user names index, it is not in", "set as index. unique (bool) : Whether to assert that", "underlying data (or at least try to) self.df = convert_variable_data(df=self.df,", "for v in self.variables]] self.set_index(self.index) if self.time_index is not None:", "not isinstance(self.df, pd.DataFrame) or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype] else: time_to_check", "Id of Entity. df (pd.DataFrame): Dataframe providing the data for", "vtype can be tuple vtype = inferred_variable_types[v] if isinstance(vtype, tuple):", "creating new \" \"integer column\".format(index)) # Case 5: make_index with", "of time_index column secondary_time_index (dict[str: [str]]): Dictionary of secondary time", "is not None: if not self.last_time_index.equals(other.last_time_index): return False if not", "time_type elif self.entityset.time_type != time_type: raise TypeError(\"%s time index is", "\" other entityset time indexes\" % (self.id, time_type)) if is_instance(self.df,", "[vtypes.PandasTypes._categorical] _datetime_types = vtypes.PandasTypes._pandas_datetimes class Entity(object): \"\"\"Represents an entity in", "string_to_class_map = find_variable_types() # TODO: Remove once Text has been", "index=None, time_index=None, secondary_time_index=None, last_time_index=None, already_sorted=False, make_index=False, verbose=False): \"\"\" Create Entity", "if isinstance(vtype, tuple): # vtype is (ft.Variable, dict_of_kwargs) _v =", "inferred_variable_types[v](v, self) variables += [_v] # convert data once we've", "isinstance(variable_ids, list): raise TypeError('variable_ids must be a list of variable", "-> type]]) : An entity's variable_types dict maps string variable", "else: _v = inferred_variable_types[v](v, self) variables += [_v] # convert", "time_index: self.set_time_index(time_index, already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index) def __repr__(self): repr_out = u\"Entity: {}\\n\".format(self.id)", "df, entityset, variable_types=None, index=None, time_index=None, secondary_time_index=None, last_time_index=None, already_sorted=False, make_index=False, verbose=False):", "variable_types, time_index, secondary_time_index) inferred_variable_types.update(variable_types) for v in inferred_variable_types: # TODO", "(pd.Series): Time index of the last event for each instance", "user specified index, which is already in df. No action", "in variables if v.id != index] def update_data(self, df, already_sorted=False,", "return self._get_variable(variable_id) def _get_variable(self, variable_id): \"\"\"Get variable instance Args: variable_id", "unique on dataframe \" \\ \"(Entity {})\".format(self.id) self.convert_variable_type(variable_id, vtypes.Index, convert_data=False)", "can be tuple vtype = inferred_variable_types[v] if isinstance(vtype, tuple): #", "self) variables += [_v] # convert data once we've inferred", "datetime\" % (self.id)) if self.entityset.time_type != time_type: raise TypeError(\"%s time", "not in df.columns: if not make_index: # Case 4: user", "\"\"\" Args: variable_id (string) : Name of an existing variable", "# TODO - consider removing this constraints # don't add", "if v.id != index] def update_data(self, df, already_sorted=False, recalculate_last_time_indexes=True): '''Update", "max_values (int) : Maximum number of values per variable to", "to generate \"where\" clauses Args: max_values (int) : Maximum number", "u\" Variables:\" for v in self.variables: repr_out += u\"\\n {}", "assert that the index is unique. \"\"\" if isinstance(self.df, pd.DataFrame):", "len(self.variables) != len(other.variables): return False if set(self.variables) != set(other.variables): return", "df. No action needed. return created_index, index, df def _validate_entity_params(id,", "a list if not isinstance(variable_ids, list): raise TypeError('variable_ids must be", "= True break if skip: continue counts = self.df[variable.id].value_counts() #", "other entityset time indexes\" % (self.id, time_type)) if time_index not", "variable to convert. new_type (subclass of `Variable`) : Type of", "= variable.interesting_values.append(pd.Series([idx])) # total_count -= counts[idx] else: break self.entityset.reset_data_description() def", "vid in variable_types.copy(): vtype = variable_types[vid] if isinstance(vtype, str): if", "secondary_time_index) self.df = df[[v.id for v in self.variables]] self.set_index(index) self.time_index", "None: self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description() def add_interesting_values(self, max_values=5, verbose=False): \"\"\" Find interesting", "which is already in df. No action needed. return created_index,", "infer_variable_types(self.df, link_vars, variable_types, time_index, secondary_time_index) inferred_variable_types.update(variable_types) for v in inferred_variable_types:", "\"To change this, specify the index parameter\")) index = df.columns[0]", "from a dataframe Args: variable_types (dict[str -> types/str/dict[str -> type]])", "variable_ids): \"\"\" Remove variables from entity's dataframe and from self.variables", "internal dataframe, optionaly making sure data is sorted, reference indexes", "variable names') if len(variable_ids) == 0: return self.df = self.df.drop(variable_ids,", "isinstance(id, str), \"Entity id must be a string\" assert len(df.columns)", "elif self.last_time_index is not None and other.last_time_index is None: return", "break if skip: continue counts = self.df[variable.id].value_counts() # find how", "checking values already_sorted = True # skip sorting else: t", "\"Duplicate column names\" for c in df.columns: if not isinstance(c,", "(dd, ks), 'DataFrame'): t = time_type # skip checking values", "(self.id)) if self.entityset.time_type is None: self.entityset.time_type = time_type elif self.entityset.time_type", "as numeric or\" \" datetime\" % (self.id)) if self.entityset.time_type is", "columns that depend on that secondary time \"\"\" variables =", "a string\" assert len(df.columns) == len(set(df.columns)), \"Duplicate column names\" for", "the underlying data (or at least try to) self.df =", ">>> es[\"customers\"].convert_variable_type(\"engagement_level\", vtypes.Categorical) \"\"\" if convert_data: # first, convert the", "for v in inferred_variable_types: # TODO document how vtype can", "+= u\"\\n Shape:\\n (Rows: {}, Columns: {})\".format( shape[0], shape[1]) return", "25% of the values we have not seen so far", "secondary time \"\"\" variables = [] variable_types = variable_types.copy() or", "it represents more than # 25% of the values we", "(str): Name of time column in the dataframe. secondary_time_index (dict[str", "!= set(other.variables): return False if deep: if self.last_time_index is None", "if self.time_index != other.time_index: return False if self.secondary_time_index != other.secondary_time_index:", "self.df = self.df.sort_values([variable_id, self.index]) self.convert_variable_type(variable_id, t, convert_data=False) self.time_index = variable_id", "index column they are associated with. last_time_index (pd.Series): Time index", "skip sorting else: t = vtypes.NumericTimeIndex if col_is_datetime(self.df[variable_id]): t =", "index parameter\")) index = df.columns[0] elif make_index and index in", "Whether to assert that the index is unique. \"\"\" if", "dataframe to the time index column they are associated with.", "seen so far if len(counts.index) < 25: if verbose: msg", "for each instance across all child entities. make_index (bool, optional)", "replace the old variable with the new one, maintaining order", "-> types/str/dict[str -> type]]) : An entity's variable_types dict maps", "variable. Raises: RuntimeError : if no variable exist with provided", "(dict[str: [str]]): Dictionary of secondary time columns that each map", "len(self.variables))) for v in self.variables: if v.id not in df.columns:", "base on user input''' created_index = None if index is", "pd from featuretools import variable_types as vtypes from featuretools.utils.entity_utils import", "to be used to generate \"where\" clauses Args: max_values (int)", "of secondary time columns that each map to a list", "make_index not specified but no index supplied, use first column", "skip: continue counts = self.df[variable.id].value_counts() # find how many of", "def __sizeof__(self): return sum([value.__sizeof__() for value in self.data.values()]) @property def", "but did not specify column name assert not make_index, \"Must", "counts[:] = counts.sort_values()[::-1] for i in range(min(max_values, len(counts.index))): idx =", "dataframe contains {} columns, expecting {}\".format(len(df.columns), len(self.variables))) for v in", "pd.Series(dtype=variable.entity.df[variable.id].dtype) # TODO - consider removing this constraints # don't", "to find basic 'where'-able variables if isinstance(variable, vtypes.Discrete): variable.interesting_values =", "2: make_index not specified but no index supplied, use first", "if self.time_index is not None: self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if recalculate_last_time_indexes", "df): '''Handles index creation logic base on user input''' created_index", "# replace the old variable with the new one, maintaining", "= counts.index[i] # add the value to interesting_values if it", "lti def __hash__(self): return id(self.id) def __getitem__(self, variable_id): return self._get_variable(variable_id)", "Make sure column ordering matches variable ordering self.df = df[[v.id", "represents more than # 25% of the values we have", "(str) : Id of variable to convert. new_type (subclass of", "{} column\".format(v.id)) # Make sure column ordering matches variable ordering", "df) self.id = id self.entityset = entityset self.data = {'df':", "data in the EntitySet. Raises: RuntimeError : Raises if it", "new_type=new_type, **kwargs) # replace the old variable with the new", "add the value to interesting_values if it represents more than", "counts.index[i] # add the value to interesting_values if it represents", "assume index does not exist as a column in dataframe,", "is not None and other.last_time_index is not None: if not", "= find_variable_types() # TODO: Remove once Text has been removed", "= variable_types[vid] if isinstance(vtype, str): if vtype in string_to_class_map: variable_types[vid]", "None if unique: assert self.df.index.is_unique, \"Index is not unique on", "index: index variable already present\") elif index not in df.columns:", "None and other.last_time_index is not None: return False elif self.last_time_index", "None: self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if recalculate_last_time_indexes and self.last_time_index is not", "self.df = self.df.drop(variable_ids, axis=1) for v_id in variable_ids: v =", "in a Entityset, and stores relevant metadata and data An", "self.set_secondary_time_index(secondary_time_index) def __repr__(self): repr_out = u\"Entity: {}\\n\".format(self.id) repr_out += u\"", "self.variables Args: variable_ids (list[str]): Variables to delete Returns: None \"\"\"", "RuntimeError : Raises if it cannot convert the underlying data", "in dataframe, creating new \" \"integer column\".format(index)) # Case 5:", "entity in a Entityset, and stores relevant metadata and data", "ks), 'DataFrame') or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype] else: time_to_check =", "= time_type elif self.entityset.time_type != time_type: raise TypeError(\"%s time index", "False variables = {variable: (variable, ) for variable in self.variables}", "0: return self.df = self.df.drop(variable_ids, axis=1) for v_id in variable_ids:", "is sorted, reference indexes to other entities are consistent, and", "if unique: assert self.df.index.is_unique, \"Index is not unique on dataframe", "other entities are consistent, and last_time_indexes are consistent. ''' if", "types''' return {v.id: type(v) for v in self.variables} def convert_variable_type(self,", "featuretools.tests.testing_utils import make_ecommerce_entityset >>> es = make_ecommerce_entityset() >>> es[\"customers\"].convert_variable_type(\"engagement_level\", vtypes.Categorical)", "{} not found in dataframe, creating new \" \"integer column\".format(index))", "time_index is not None and time_index not in df.columns: raise", "type]]) : An entity's variable_types dict maps string variable ids", "how many of each unique value there are; sort by", "lti): self.data[\"last_time_index\"] = lti def __hash__(self): return id(self.id) def __getitem__(self,", "no index supplied, use first column warnings.warn((\"Using first column as", "variables = {variable: (variable, ) for variable in self.variables} for", "already exists raise RuntimeError(\"Cannot make index: index variable already present\")", "indexes\" % (self.id, time_type)) if is_instance(self.df, (dd, ks), 'DataFrame'): t", "the variables from a dataframe Args: variable_types (dict[str -> types/str/dict[str", "this, specify the index parameter\")) index = df.columns[0] elif make_index", "is missing new {} column\".format(v.id)) # Make sure column ordering", "def update_data(self, df, already_sorted=False, recalculate_last_time_indexes=True): '''Update entity's internal dataframe, optionaly", "!= len(self.variables): raise ValueError(\"Updated dataframe contains {} columns, expecting {}\".format(len(df.columns),", "for vid in variable_types.copy(): vtype = variable_types[vid] if isinstance(vtype, str):", "not _dataframes_equal(self.df, other.df): return False variables = {variable: (variable, )", "time_type # skip checking values already_sorted = True # skip", "one, maintaining order variable = self._get_variable(variable_id) new_variable = new_type.create_from(variable) self.variables[self.variables.index(variable)]", "of variable to get. Returns: :class:`.Variable` : Instance of variable.", "= secondary_time_index or {} self._create_variables(variable_types, index, time_index, secondary_time_index) self.df =", "as an \" msg += \"interesting value\" logger.info(msg.format(variable.id, idx)) variable.interesting_values", "are; sort by count, # and add interesting values to", "already_sorted: # sort by time variable, then by index self.df", "other_var in variables.values(): if not self_var.__eq__(other_var, deep=True): return False return", "list of variable names') if len(variable_ids) == 0: return self.df", "many of each unique value there are; sort by count,", "for the entity.''' return self.data[\"df\"] @df.setter def df(self, _df): self.data[\"df\"]", "for Entity inputs''' assert isinstance(id, str), \"Entity id must be", "\"\"\" if isinstance(self.df, pd.DataFrame): self.df = self.df.set_index(self.df[variable_id], drop=False) self.df.index.name =", "**kwargs): \"\"\"Convert variable in dataframe to different type Args: variable_id", "matches variable ordering self.df = df[[v.id for v in self.variables]]", "self.last_time_index.equals(other.last_time_index): return False if not _dataframes_equal(self.df, other.df): return False variables", "columns, expecting {}\".format(len(df.columns), len(self.variables))) for v in self.variables: if v.id", "entity. entityset (EntitySet): Entityset for this Entity. variable_types (dict[str ->", "id \"\"\" for v in self.variables: if v.id == variable_id:", "of id column in the dataframe. time_index (str): Name of", "(str or None): Name of time_index column secondary_time_index (dict[str: [str]]):", "verbose (bool) : If True, print summary of interesting values", "Also: :class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet` \"\"\" def __init__(self, id, df, entityset,", "self.df = df[[v.id for v in self.variables]] self.set_index(self.index) if self.time_index", "i in range(min(max_values, len(counts.index))): idx = counts.index[i] # add the", "df = df.koalas.attach_id_column('distributed-sequence', index) else: df.insert(0, index, range(len(df))) created_index =", "uses this code path) if isinstance(df, dd.DataFrame): df[index] = 1", "if it cannot convert the underlying data Examples: >>> from", "inferred_variable_types: # TODO document how vtype can be tuple vtype", "deep=True): return False return True def __sizeof__(self): return sum([value.__sizeof__() for", "dask.dataframe as dd import numpy as np import pandas as", "ks), 'DataFrame'): t = time_type # skip checking values already_sorted", "!= len(other.variables): return False if set(self.variables) != set(other.variables): return False", "_check_time_type(time_to_check) if time_type is None: raise TypeError(\"%s time index not", "time_to_check = self.df[time_index].head(1).iloc[0] time_type = _check_time_type(time_to_check) if time_type is None:", "from variable types string_to_class_map[Text.type_string] = Text for vid in variable_types.copy():", "and add interesting values to each variable total_count = np.sum(counts)", "total_count = np.sum(counts) counts[:] = counts.sort_values()[::-1] for i in range(min(max_values,", "self.df[variable_id].iloc[0] time_type = _check_time_type(time_to_check) if time_type is None: raise TypeError(\"%s", "vtypes.PandasTypes._pandas_numerics _categorical_types = [vtypes.PandasTypes._categorical] _datetime_types = vtypes.PandasTypes._pandas_datetimes class Entity(object): \"\"\"Represents", "user wanted to make index but column already exists raise", "if self.secondary_time_index != other.secondary_time_index: return False if len(self.variables) != len(other.variables):", "for v in variables if v.id == index][0] self.variables =", "be tuple vtype = inferred_variable_types[v] if isinstance(vtype, tuple): # vtype", "already_sorted=False, recalculate_last_time_indexes=True): '''Update entity's internal dataframe, optionaly making sure data", "deep=False): if self.index != other.index: return False if self.time_index !=", "import Text, find_variable_types ks = import_or_none('databricks.koalas') logger = logging.getLogger('featuretools.entityset') _numeric_types", "time column in the dataframe. secondary_time_index (dict[str -> str]): Dictionary", "not already_sorted: # sort by time variable, then by index", "be a list of variable names') if len(variable_ids) == 0:", "time_index, secondary_time_index) inferred_variable_types.update(variable_types) for v in inferred_variable_types: # TODO document", "of variable to convert. new_type (subclass of `Variable`) : Type", "exists in dataframe. \"\"\" _validate_entity_params(id, df, time_index) created_index, index, df", "Id of variable to convert. new_type (subclass of `Variable`) :", "the beginning index_variable = [v for v in variables if", "v.id != index] def update_data(self, df, already_sorted=False, recalculate_last_time_indexes=True): '''Update entity's", "for v in self.variables} def convert_variable_type(self, variable_id, new_type, convert_data=True, **kwargs):", "consistent, and last_time_indexes are consistent. ''' if len(df.columns) != len(self.variables):", "assume index exists in dataframe. \"\"\" _validate_entity_params(id, df, time_index) created_index,", ":class:`.EntitySet` \"\"\" def __init__(self, id, df, entityset, variable_types=None, index=None, time_index=None,", "= self.df[variable.id].value_counts() # find how many of each unique value", "to) self.df = convert_variable_data(df=self.df, column_id=variable_id, new_type=new_type, **kwargs) # replace the", "\"(Entity {})\".format(self.id) self.convert_variable_type(variable_id, vtypes.Index, convert_data=False) self.index = variable_id def set_secondary_time_index(self,", "get_linked_vars, infer_variable_types ) from featuretools.utils.gen_utils import import_or_none, is_instance from featuretools.utils.wrangle", "the data for the entity. entityset (EntitySet): Entityset for this", "(Rows: {}, Columns: {})\".format( shape[0], shape[1]) return repr_out @property def", "arguments to the Variable. index (str): Name of id column", "vtype[0](v, self, **vtype[1]) else: _v = inferred_variable_types[v](v, self) variables +=", "and data An Entity is analogous to a table in", "Remove once Text has been removed from variable types string_to_class_map[Text.type_string]", "column as index. \" \"To change this, specify the index", "secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns that each", "names index, it is not in df. does not specify", "as np import pandas as pd from featuretools import variable_types", "last_time_index(self, lti): self.data[\"last_time_index\"] = lti def __hash__(self): return id(self.id) def", "Args: variable_types (dict[str -> types/str/dict[str -> type]]) : An entity's", "variables[variable] += (variable, ) for self_var, other_var in variables.values(): if", "sure data is sorted, reference indexes to other entities are", "return False if self.time_index != other.time_index: return False if self.secondary_time_index", "value to interesting_values if it represents more than # 25%", "{} \" \"is not a string)\".format(c)) if time_index is not", "= df[index].cumsum() - 1 elif is_instance(df, ks, 'DataFrame'): df =", "Make new index column and warn warnings.warn(\"index {} not found", "time_index): '''Validation checks for Entity inputs''' assert isinstance(id, str), \"Entity", "the time index column they are associated with. last_time_index (pd.Series):", "index. \" \"To change this, specify the index parameter\")) index", "- 1 elif is_instance(df, ks, 'DataFrame'): df = df.koalas.attach_id_column('distributed-sequence', index)", "is None: # Case 1: user wanted to make index", "found in entity\" % (variable_id)) @property def variable_types(self): '''Dictionary mapping", "in self.variables} def convert_variable_type(self, variable_id, new_type, convert_data=True, **kwargs): \"\"\"Convert variable", "= inferred_variable_types[v] if isinstance(vtype, tuple): # vtype is (ft.Variable, dict_of_kwargs)", "make_index, df) self.id = id self.entityset = entityset self.data =", "{}\".format(len(df.columns), len(self.variables))) for v in self.variables: if v.id not in", ": Id of variable to convert. new_type (subclass of `Variable`)", ": If True, convert underlying data in the EntitySet. Raises:", "variable, then by index self.df = self.df.sort_values([variable_id, self.index]) self.convert_variable_type(variable_id, t,", "def shape(self): '''Shape of the entity's dataframe''' return self.df.shape def", "v.type_string) shape = self.shape repr_out += u\"\\n Shape:\\n (Rows: {},", "is None and other.last_time_index is not None: return False elif", "set_time_index(self, variable_id, already_sorted=False): # check time type if not isinstance(self.df,", "not self.last_time_index.equals(other.last_time_index): return False if not _dataframes_equal(self.df, other.df): return False", "is unique. \"\"\" if isinstance(self.df, pd.DataFrame): self.df = self.df.set_index(self.df[variable_id], drop=False)", "time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype] else: time_to_check = self.df[time_index].head(1).iloc[0] time_type = _check_time_type(time_to_check)", "+ [v for v in variables if v.id != index]", "if it represents more than # 25% of the values", "print summary of interesting values found. Returns: None \"\"\" for", "unique value there are; sort by count, # and add", "be strings (Column {} \" \"is not a string)\".format(c)) if", "Examples: >>> from featuretools.tests.testing_utils import make_ecommerce_entityset >>> es = make_ecommerce_entityset()", "# use stable sort if not already_sorted: # sort by", "at least try to) self.df = convert_variable_data(df=self.df, column_id=variable_id, new_type=new_type, **kwargs)", "skip = True break if skip: continue counts = self.df[variable.id].value_counts()", "(:class:`.Variable`) or type_strings (str) or (type, kwargs) to pass keyword", "vtype is (ft.Variable, dict_of_kwargs) _v = vtype[0](v, self, **vtype[1]) else:", "other.last_time_index is not None: if not self.last_time_index.equals(other.last_time_index): return False if", ": EntitySet associated with this entity. convert_data (bool) : If", "keyword arguments to the Variable. index (str): Name of id", "add_interesting_values(self, max_values=5, verbose=False): \"\"\" Find interesting values for categorical variables,", "order variable = self._get_variable(variable_id) new_variable = new_type.create_from(variable) self.variables[self.variables.index(variable)] = new_variable", "that the index is unique. \"\"\" if isinstance(self.df, pd.DataFrame): self.df", "variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) else: fraction = counts[idx] / total_count if", "[v for v in variables if v.id != index] def", "5: make_index with no errors or warnings # (Case 4", "variable_types=None, index=None, time_index=None, secondary_time_index=None, last_time_index=None, already_sorted=False, make_index=False, verbose=False): \"\"\" Create", "\" \"integer column\".format(index)) # Case 5: make_index with no errors", "{} self._create_variables(variable_types, index, time_index, secondary_time_index) self.df = df[[v.id for v", "variable_id, already_sorted=False): # check time type if not isinstance(self.df, pd.DataFrame)", "shape[1]) return repr_out @property def shape(self): '''Shape of the entity's", "for v in self.variables]] self.set_index(index) self.time_index = None if time_index:", "value\" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) else: fraction = counts[idx]", "of values per variable to add. verbose (bool) : If", "deep: if self.last_time_index is None and other.last_time_index is not None:", "if index not in variable_types: variable_types[index] = vtypes.Index link_vars =", "__repr__(self): repr_out = u\"Entity: {}\\n\".format(self.id) repr_out += u\" Variables:\" for", "v in inferred_variable_types: # TODO document how vtype can be", "+= [_v] # convert data once we've inferred self.df =", "the last event for each instance across all child entities.", ">>> es = make_ecommerce_entityset() >>> es[\"customers\"].convert_variable_type(\"engagement_level\", vtypes.Categorical) \"\"\" if convert_data:", "of columns that depend on that secondary time \"\"\" variables", "index = df.columns[0] elif make_index and index in df.columns: #", "data for the entity. entityset (EntitySet): Entityset for this Entity.", "unique (bool) : Whether to assert that the index is", "pd.DataFrame): self.df = self.df.set_index(self.df[variable_id], drop=False) self.df.index.name = None if unique:", "msg += \"interesting value\" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) #", "other.time_index: return False if self.secondary_time_index != other.secondary_time_index: return False if", "set(other.variables): return False if deep: if self.last_time_index is None and", "_datetime_types = vtypes.PandasTypes._pandas_datetimes class Entity(object): \"\"\"Represents an entity in a", ": if no variable exist with provided id \"\"\" for", "def convert_variable_type(self, variable_id, new_type, convert_data=True, **kwargs): \"\"\"Convert variable in dataframe", "in variable_types.copy(): vtype = variable_types[vid] if isinstance(vtype, str): if vtype", "(str): Id of Entity. df (pd.DataFrame): Dataframe providing the data", "underlying data in the EntitySet. Raises: RuntimeError : Raises if", "in columns: columns.append(time_index) self.secondary_time_index = secondary_time_index def _create_index(index, make_index, df):", "isinstance(c, str): raise ValueError(\"All column names must be strings (Column", "convert_data=True, **kwargs): \"\"\"Convert variable in dataframe to different type Args:", ": Type of variable to convert to. entityset (:class:`.BaseEntitySet`) :", "\"integer column\".format(index)) # Case 5: make_index with no errors or", "other.last_time_index is None: return False elif self.last_time_index is not None", "= counts[idx] / total_count if fraction > 0.05 and fraction", "\"\"\" if convert_data: # first, convert the underlying data (or", "has been removed from variable types string_to_class_map[Text.type_string] = Text for", "df.koalas.attach_id_column('distributed-sequence', index) else: df.insert(0, index, range(len(df))) created_index = index #", "Raises: RuntimeError : Raises if it cannot convert the underlying", "self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype] else: time_to_check = self.df[variable_id].iloc[0] time_type =", "check if variable is not a list if not isinstance(variable_ids,", "return sum([value.__sizeof__() for value in self.data.values()]) @property def df(self): '''Dataframe", "and self.last_time_index is not None: self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description() def add_interesting_values(self, max_values=5,", "(:class:`.Variable`) or type_string (str) or (type, kwargs) to pass keyword", "!= other.secondary_time_index: return False if len(self.variables) != len(other.variables): return False", "variable_id: return v raise KeyError(\"Variable: %s not found in entity\"", "not None and other.last_time_index is not None: if not self.last_time_index.equals(other.last_time_index):", "self.df.index.is_unique, \"Index is not unique on dataframe \" \\ \"(Entity", "associated with this entity. convert_data (bool) : If True, convert", "exist as a column in dataframe, and create a new", "check time type if not isinstance(self.df, pd.DataFrame) or self.df.empty: time_to_check", "def _get_variable(self, variable_id): \"\"\"Get variable instance Args: variable_id (str) :", "dataframe, optionaly making sure data is sorted, reference indexes to", "are consistent, and last_time_indexes are consistent. ''' if len(df.columns) !=", "def __repr__(self): repr_out = u\"Entity: {}\\n\".format(self.id) repr_out += u\" Variables:\"", "import dask.dataframe as dd import numpy as np import pandas", "in a relational database See Also: :class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet` \"\"\"", "(bool) : Whether to assert that the index is unique.", "is analogous to a table in a relational database See", "or None): Name of time_index column secondary_time_index (dict[str: [str]]): Dictionary", "associated with. last_time_index (pd.Series): Time index of the last event", "user names index, it is not in df. does not", "None and other.last_time_index is None: return False elif self.last_time_index is", "far if len(counts.index) < 25: if verbose: msg = \"Variable", "= variable_id def set_index(self, variable_id, unique=True): \"\"\" Args: variable_id (string)", "self.df.sort_values([variable_id, self.index]) self.convert_variable_type(variable_id, t, convert_data=False) self.time_index = variable_id def set_index(self,", "on that secondary time \"\"\" variables = [] variable_types =", "entity's dataframe''' return self.df.shape def __eq__(self, other, deep=False): if self.index", "not specify # make_index. Make new index column and warn", "sorted, reference indexes to other entities are consistent, and last_time_indexes", "is not a list if not isinstance(variable_ids, list): raise TypeError('variable_ids", "@last_time_index.setter def last_time_index(self, lti): self.data[\"last_time_index\"] = lti def __hash__(self): return", "columns in secondary_time_index.items(): if is_instance(self.df, (dd, ks), 'DataFrame') or self.df.empty:", "inferred_variable_types.update(variable_types) for v in inferred_variable_types: # TODO document how vtype", "variable_id, new_type, convert_data=True, **kwargs): \"\"\"Convert variable in dataframe to different", "make_index is True\" # Case 2: make_index not specified but", "_dataframes_equal from featuretools.variable_types import Text, find_variable_types ks = import_or_none('databricks.koalas') logger", "df, 'last_time_index': last_time_index} self.created_index = created_index self._verbose = verbose secondary_time_index", "contains {} columns, expecting {}\".format(len(df.columns), len(self.variables))) for v in self.variables:", "self.variables.remove(v) def set_time_index(self, variable_id, already_sorted=False): # check time type if", "column time_index (str or None): Name of time_index column secondary_time_index", "but column already exists raise RuntimeError(\"Cannot make index: index variable", "kwargs) to pass keyword arguments to the Variable. index (str):", "logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) # total_count -= counts[idx] else:", ") for self_var, other_var in variables.values(): if not self_var.__eq__(other_var, deep=True):", "to get. Returns: :class:`.Variable` : Instance of variable. Raises: RuntimeError", "dataframe, creating new \" \"integer column\".format(index)) # Case 5: make_index", "of each unique value there are; sort by count, #", "is not unique on dataframe \" \\ \"(Entity {})\".format(self.id) self.convert_variable_type(variable_id,", "# Make sure column ordering matches variable ordering self.df =", "index, it is not in df. does not specify #", "@property def last_time_index(self): ''' Time index of the last event", "that each map to a list of columns that depend", "index does not exist as a column in dataframe, and", "True, convert underlying data in the EntitySet. Raises: RuntimeError :", "Variables:\" for v in self.variables: repr_out += u\"\\n {} (dtype:", "for v_id in variable_ids: v = self._get_variable(v_id) self.variables.remove(v) def set_time_index(self,", "instance across all child entities. ''' return self.data[\"last_time_index\"] @last_time_index.setter def", "False return True def __sizeof__(self): return sum([value.__sizeof__() for value in", "not isinstance(variable_ids, list): raise TypeError('variable_ids must be a list of", "variable exist with provided id \"\"\" for v in self.variables:", "from featuretools.tests.testing_utils import make_ecommerce_entityset >>> es = make_ecommerce_entityset() >>> es[\"customers\"].convert_variable_type(\"engagement_level\",", "convert_data (bool) : If True, convert underlying data in the", "column they are associated with. last_time_index (pd.Series): Time index of", "= vtypes.DatetimeTimeIndex # use stable sort if not already_sorted: #", "variable_types (dict[str -> types/str/dict[str -> type]]) : An entity's variable_types", "'DataFrame'): t = time_type # skip checking values already_sorted =", "variable = self._get_variable(variable_id) new_variable = new_type.create_from(variable) self.variables[self.variables.index(variable)] = new_variable def", "list): raise TypeError('variable_ids must be a list of variable names')", "_create_index(index, make_index, df): '''Handles index creation logic base on user", "column already exists raise RuntimeError(\"Cannot make index: index variable already", "dataframe, and create a new column of that name using", "self.index]) self.convert_variable_type(variable_id, t, convert_data=False) self.time_index = variable_id def set_index(self, variable_id,", "child entities. make_index (bool, optional) : If True, assume index", "get_linked_vars(self) inferred_variable_types = infer_variable_types(self.df, link_vars, variable_types, time_index, secondary_time_index) inferred_variable_types.update(variable_types) for", "an entity in a Entityset, and stores relevant metadata and", "def set_index(self, variable_id, unique=True): \"\"\" Args: variable_id (string) : Name", "= df[[v.id for v in self.variables]] self.set_index(self.index) if self.time_index is", "def df(self, _df): self.data[\"df\"] = _df @property def last_time_index(self): '''", "variable_types dict maps string variable ids to types (:class:`.Variable`) or", "created_index = None if index is None: # Case 1:", "def last_time_index(self, lti): self.data[\"last_time_index\"] = lti def __hash__(self): return id(self.id)", "(string) : Name of an existing variable to set as", "value there are; sort by count, # and add interesting", "with provided id \"\"\" for v in self.variables: if v.id", "and last_time_indexes are consistent. ''' if len(df.columns) != len(self.variables): raise", "Returns: :class:`.Variable` : Instance of variable. Raises: RuntimeError : if", "inputs''' assert isinstance(id, str), \"Entity id must be a string\"", "v raise KeyError(\"Variable: %s not found in entity\" % (variable_id))", "generate \"where\" clauses Args: max_values (int) : Maximum number of", "then by index self.df = self.df.sort_values([variable_id, self.index]) self.convert_variable_type(variable_id, t, convert_data=False)", "self.time_index != other.time_index: return False if self.secondary_time_index != other.secondary_time_index: return", "in other.variables: variables[variable] += (variable, ) for self_var, other_var in", "1 df[index] = df[index].cumsum() - 1 elif is_instance(df, ks, 'DataFrame'):", "the EntitySet. Raises: RuntimeError : Raises if it cannot convert", "index column time_index (str or None): Name of time_index column", "= [] variable_types = variable_types.copy() or {} string_to_class_map = find_variable_types()", "= self.df.sort_values([variable_id, self.index]) self.convert_variable_type(variable_id, t, convert_data=False) self.time_index = variable_id def", "basic 'where'-able variables if isinstance(variable, vtypes.Discrete): variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype) #", "featuretools.utils.wrangle import _check_time_type, _dataframes_equal from featuretools.variable_types import Text, find_variable_types ks", "self.time_index = None if time_index: self.set_time_index(time_index, already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index) def __repr__(self):", "if convert_data: # first, convert the underlying data (or at", "# Case 1: user wanted to make index but did", "parameter\")) index = df.columns[0] elif make_index and index in df.columns:", "v in self.variables]] self.set_index(index) self.time_index = None if time_index: self.set_time_index(time_index,", "variable types string_to_class_map[Text.type_string] = Text for vid in variable_types.copy(): vtype", "variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype) # TODO - consider removing this constraints", "\"Variable {}: Marking {} as an \" msg += \"interesting", "self.time_index = variable_id def set_index(self, variable_id, unique=True): \"\"\" Args: variable_id", "variable types''' return {v.id: type(v) for v in self.variables} def", "across all child entities. ''' return self.data[\"last_time_index\"] @last_time_index.setter def last_time_index(self,", "col_is_datetime(self.df[variable_id]): t = vtypes.DatetimeTimeIndex # use stable sort if not", "sum([value.__sizeof__() for value in self.data.values()]) @property def df(self): '''Dataframe providing", "self, **vtype[1]) else: _v = inferred_variable_types[v](v, self) variables += [_v]", "assert self.df.index.is_unique, \"Index is not unique on dataframe \" \\", "(dict[str -> types/str/dict[str -> type]]) : An entity's variable_types dict", "and other.last_time_index is None: return False elif self.last_time_index is not", "types (:class:`.Variable`) or type_string (str) or (type, kwargs) to pass", "of the values we have not seen so far if", "string_to_class_map[vtype] else: variable_types[vid] = string_to_class_map['unknown'] warnings.warn(\"Variable type {} was unrecognized,", "list if not isinstance(variable_ids, list): raise TypeError('variable_ids must be a", "keyword arguments to the Variable. index (str): Name of index", "< 0.95: if verbose: msg = \"Variable {}: Marking {}", "= verbose secondary_time_index = secondary_time_index or {} self._create_variables(variable_types, index, time_index,", "time_index, secondary_time_index): \"\"\"Extracts the variables from a dataframe Args: variable_types", "names') if len(variable_ids) == 0: return self.df = self.df.drop(variable_ids, axis=1)", "for variable in self.variables} for variable in other.variables: variables[variable] +=", "we have not seen so far if len(counts.index) < 25:", "return False if len(self.variables) != len(other.variables): return False if set(self.variables)", "df[[v.id for v in self.variables]] self.set_index(self.index) if self.time_index is not", "make_index: # Case 4: user names index, it is not", "id's to variable types''' return {v.id: type(v) for v in", "'DataFrame') or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype] else: time_to_check = self.df[time_index].head(1).iloc[0]", "= _create_index(index, make_index, df) self.id = id self.entityset = entityset", "column of that name using integers the (0, len(dataframe)). Otherwise,", "= index # Case 6: user specified index, which is", "in the dataframe to the time index column they are", "= variable_types.copy() or {} string_to_class_map = find_variable_types() # TODO: Remove", "column_id=variable_id, new_type=new_type, **kwargs) # replace the old variable with the", "= df[[v.id for v in self.variables]] self.set_index(index) self.time_index = None", "if not isinstance(self.df, pd.DataFrame) or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype] else:", "return False elif self.last_time_index is not None and other.last_time_index is", "time variable, then by index self.df = self.df.sort_values([variable_id, self.index]) self.convert_variable_type(variable_id,", "If True, print summary of interesting values found. Returns: None", "import numpy as np import pandas as pd from featuretools", "in self.variables]] self.set_index(index) self.time_index = None if time_index: self.set_time_index(time_index, already_sorted=already_sorted)", "v in self.variables: if v.id not in df.columns: raise ValueError(\"Updated", "Case 5: make_index with no errors or warnings # (Case", "is not None and other.last_time_index is None: return False elif", "return created_index, index, df def _validate_entity_params(id, df, time_index): '''Validation checks", "from featuretools import variable_types as vtypes from featuretools.utils.entity_utils import (", "if v.id == variable_id: return v raise KeyError(\"Variable: %s not", "is not None: self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description() def add_interesting_values(self, max_values=5, verbose=False): \"\"\"", "specified index, which is already in df. No action needed.", "numpy as np import pandas as pd from featuretools import", "_create_index(index, make_index, df) self.id = id self.entityset = entityset self.data", "= string_to_class_map[vtype] else: variable_types[vid] = string_to_class_map['unknown'] warnings.warn(\"Variable type {} was", "variable to get. Returns: :class:`.Variable` : Instance of variable. Raises:", "ordering matches variable ordering self.df = df[[v.id for v in", "self.variables = [index_variable] + [v for v in variables if", "maps string variable ids to types (:class:`.Variable`) or type_string (str)", "to make index but column already exists raise RuntimeError(\"Cannot make", "values we have not seen so far if len(counts.index) <", "types/str/dict[str -> type]]) : An entity's variable_types dict maps string", "self.df.drop(variable_ids, axis=1) for v_id in variable_ids: v = self._get_variable(v_id) self.variables.remove(v)", "u\"Entity: {}\\n\".format(self.id) repr_out += u\" Variables:\" for v in self.variables:", "{})\".format( shape[0], shape[1]) return repr_out @property def shape(self): '''Shape of", "supplied, use first column warnings.warn((\"Using first column as index. \"", "convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types ) from featuretools.utils.gen_utils import import_or_none, is_instance", "variable_id, unique=True): \"\"\" Args: variable_id (string) : Name of an", "self.time_index is not None: self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if recalculate_last_time_indexes and", "no variable exist with provided id \"\"\" for v in", "repr_out @property def shape(self): '''Shape of the entity's dataframe''' return", "KeyError(\"Variable: %s not found in entity\" % (variable_id)) @property def", "not found in entity\" % (variable_id)) @property def variable_types(self): '''Dictionary", "str): if vtype in string_to_class_map: variable_types[vid] = string_to_class_map[vtype] else: variable_types[vid]", "df (pd.DataFrame): Dataframe providing the data for the entity. entityset", "vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype] else: time_to_check = self.df[variable_id].iloc[0] time_type = _check_time_type(time_to_check) if time_type", "{'df': df, 'last_time_index': last_time_index} self.created_index = created_index self._verbose = verbose", "return False return True def __sizeof__(self): return sum([value.__sizeof__() for value", "as numeric or\" \" datetime\" % (self.id)) if self.entityset.time_type !=", "= df.columns[0] elif make_index and index in df.columns: # Case", "`Variable`) : Type of variable to convert to. entityset (:class:`.BaseEntitySet`)", "to the Variable. index (str): Name of index column time_index", "def __hash__(self): return id(self.id) def __getitem__(self, variable_id): return self._get_variable(variable_id) def", "variable in dataframe to different type Args: variable_id (str) :", "id self.entityset = entityset self.data = {'df': df, 'last_time_index': last_time_index}", "and warn warnings.warn(\"index {} not found in dataframe, creating new", "analogous to a table in a relational database See Also:", "already in df. No action needed. return created_index, index, df", "= [vtypes.PandasTypes._categorical] _datetime_types = vtypes.PandasTypes._pandas_datetimes class Entity(object): \"\"\"Represents an entity", "= make_ecommerce_entityset() >>> es[\"customers\"].convert_variable_type(\"engagement_level\", vtypes.Categorical) \"\"\" if convert_data: # first,", "type if not isinstance(self.df, pd.DataFrame) or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype]", ": Maximum number of values per variable to add. verbose", "is_instance(df, ks, 'DataFrame'): df = df.koalas.attach_id_column('distributed-sequence', index) else: df.insert(0, index,", "don't add interesting values for entities in relationships skip =", "_df): self.data[\"df\"] = _df @property def last_time_index(self): ''' Time index", "in relationships skip = False for r in self.entityset.relationships: if", "sure index is at the beginning index_variable = [v for", "self.last_time_index is not None and other.last_time_index is None: return False", "convert. new_type (subclass of `Variable`) : Type of variable to", "link_vars, variable_types, time_index, secondary_time_index) inferred_variable_types.update(variable_types) for v in inferred_variable_types: #", "so far if len(counts.index) < 25: if verbose: msg =", "\"\"\" # check if variable is not a list if", "of the entity's dataframe''' return self.df.shape def __eq__(self, other, deep=False):", "import variable_types as vtypes from featuretools.utils.entity_utils import ( col_is_datetime, convert_all_variable_data,", "logger = logging.getLogger('featuretools.entityset') _numeric_types = vtypes.PandasTypes._pandas_numerics _categorical_types = [vtypes.PandasTypes._categorical] _datetime_types", "__hash__(self): return id(self.id) def __getitem__(self, variable_id): return self._get_variable(variable_id) def _get_variable(self,", "the Variable. index (str): Name of index column time_index (str", "variable.interesting_values.append(pd.Series([idx])) # total_count -= counts[idx] else: break self.entityset.reset_data_description() def delete_variables(self,", "if no variable exist with provided id \"\"\" for v", "Raises if it cannot convert the underlying data Examples: >>>", "df = _create_index(index, make_index, df) self.id = id self.entityset =", "which differs from\" \" other entityset time indexes\" % (self.id,", "last_time_index (pd.Series): Time index of the last event for each", "with the new one, maintaining order variable = self._get_variable(variable_id) new_variable", "df[index] = 1 df[index] = df[index].cumsum() - 1 elif is_instance(df,", "% (self.id)) if self.entityset.time_type is None: self.entityset.time_type = time_type elif", "(bool, optional) : If True, assume index does not exist", "data for the entity.''' return self.data[\"df\"] @df.setter def df(self, _df):", "variable to set as index. unique (bool) : Whether to", "repr_out += u\"\\n Shape:\\n (Rows: {}, Columns: {})\".format( shape[0], shape[1])", "all child entities. ''' return self.data[\"last_time_index\"] @last_time_index.setter def last_time_index(self, lti):", "variables if isinstance(variable, vtypes.Discrete): variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype) # TODO -", "index but column already exists raise RuntimeError(\"Cannot make index: index", "range(min(max_values, len(counts.index))): idx = counts.index[i] # add the value to", "def last_time_index(self): ''' Time index of the last event for", "other.variables: variables[variable] += (variable, ) for self_var, other_var in variables.values():", "Time index of the last event for each instance across", "values already_sorted = True # skip sorting else: t =", "== 0: return self.df = self.df.drop(variable_ids, axis=1) for v_id in", "to a table in a relational database See Also: :class:`.Relationship`,", "not None and other.last_time_index is None: return False elif self.last_time_index", "= counts.sort_values()[::-1] for i in range(min(max_values, len(counts.index))): idx = counts.index[i]", "elif index not in df.columns: if not make_index: # Case", "self.set_secondary_time_index(self.secondary_time_index) if recalculate_last_time_indexes and self.last_time_index is not None: self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description()", "time index not recognized as numeric or\" \" datetime\" %", "unique=True): \"\"\" Args: variable_id (string) : Name of an existing", "and fraction < 0.95: if verbose: msg = \"Variable {}:", "-> str]): Dictionary mapping columns in the dataframe to the", "make_index (bool, optional) : If True, assume index does not", "more than # 25% of the values we have not", "True break if skip: continue counts = self.df[variable.id].value_counts() # find", "table in a relational database See Also: :class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet`", "None: return False elif self.last_time_index is not None and other.last_time_index", "providing the data for the entity.''' return self.data[\"df\"] @df.setter def", "found. Returns: None \"\"\" for variable in self.variables: # some", "removing this constraints # don't add interesting values for entities", "= \"Variable {}: Marking {} as an \" msg +=", "variable_id def set_secondary_time_index(self, secondary_time_index): for time_index, columns in secondary_time_index.items(): if", "= get_linked_vars(self) inferred_variable_types = infer_variable_types(self.df, link_vars, variable_types, time_index, secondary_time_index) inferred_variable_types.update(variable_types)", "{}\\n\".format(self.id) repr_out += u\" Variables:\" for v in self.variables: repr_out", "msg = \"Variable {}: Marking {} as an \" msg", "= new_type.create_from(variable) self.variables[self.variables.index(variable)] = new_variable def _create_variables(self, variable_types, index, time_index,", "counts[idx] / total_count if fraction > 0.05 and fraction <", "for this Entity. variable_types (dict[str -> type/str/dict[str -> type]]) :", "not make_index: # Case 4: user names index, it is", "False if self.time_index != other.time_index: return False if self.secondary_time_index !=", "each unique value there are; sort by count, # and", "v in variables if v.id == index][0] self.variables = [index_variable]", "= convert_all_variable_data(df=self.df, variable_types=inferred_variable_types) # make sure index is at the", "is None: return False elif self.last_time_index is not None and", "time columns that each map to a list of columns", "of variable. Raises: RuntimeError : if no variable exist with", "each map to a list of columns that depend on", "len(df.columns) == len(set(df.columns)), \"Duplicate column names\" for c in df.columns:", "string_to_class_map: variable_types[vid] = string_to_class_map[vtype] else: variable_types[vid] = string_to_class_map['unknown'] warnings.warn(\"Variable type", "secondary_time_index): \"\"\"Extracts the variables from a dataframe Args: variable_types (dict[str", ": Name of an existing variable to set as index.", "and index in df.columns: # Case 3: user wanted to", "is not in df. does not specify # make_index. Make", "self.entityset.reset_data_description() def delete_variables(self, variable_ids): \"\"\" Remove variables from entity's dataframe", "'''Handles index creation logic base on user input''' created_index =", "value in self.data.values()]) @property def df(self): '''Dataframe providing the data", "elif is_instance(df, ks, 'DataFrame'): df = df.koalas.attach_id_column('distributed-sequence', index) else: df.insert(0,", "self.variables]] self.set_index(self.index) if self.time_index is not None: self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index)", "if set(self.variables) != set(other.variables): return False if deep: if self.last_time_index", "to. entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity. convert_data", "type_string (str) or (type, kwargs) to pass keyword arguments to", "(variable, ) for variable in self.variables} for variable in other.variables:", "for v in self.variables: if v.id not in df.columns: raise", "\"\"\" def __init__(self, id, df, entityset, variable_types=None, index=None, time_index=None, secondary_time_index=None,", "maintaining order variable = self._get_variable(variable_id) new_variable = new_type.create_from(variable) self.variables[self.variables.index(variable)] =", "Entity(object): \"\"\"Represents an entity in a Entityset, and stores relevant", "(Column {} \" \"is not a string)\".format(c)) if time_index is", "# make_index. Make new index column and warn warnings.warn(\"index {}", "in df.columns: if not isinstance(c, str): raise ValueError(\"All column names", "__eq__(self, other, deep=False): if self.index != other.index: return False if", "time index is %s type which differs from\" \" other", "index) else: df.insert(0, index, range(len(df))) created_index = index # Case", "df(self, _df): self.data[\"df\"] = _df @property def last_time_index(self): ''' Time", "Id of variable to get. Returns: :class:`.Variable` : Instance of", "new_type.create_from(variable) self.variables[self.variables.index(variable)] = new_variable def _create_variables(self, variable_types, index, time_index, secondary_time_index):", "by time variable, then by index self.df = self.df.sort_values([variable_id, self.index])", "if time_index not in columns: columns.append(time_index) self.secondary_time_index = secondary_time_index def", "is True\" # Case 2: make_index not specified but no", "if len(df.columns) != len(self.variables): raise ValueError(\"Updated dataframe contains {} columns,", "3: user wanted to make index but column already exists", "Remove variables from entity's dataframe and from self.variables Args: variable_ids", "Variable. index (str): Name of index column time_index (str or", "if is_instance(self.df, (dd, ks), 'DataFrame') or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype]", "secondary time columns that each map to a list of", "RuntimeError(\"Cannot make index: index variable already present\") elif index not", "or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype] else: time_to_check = self.df[time_index].head(1).iloc[0] time_type", "_numeric_types = vtypes.PandasTypes._pandas_numerics _categorical_types = [vtypes.PandasTypes._categorical] _datetime_types = vtypes.PandasTypes._pandas_datetimes class", "ks = import_or_none('databricks.koalas') logger = logging.getLogger('featuretools.entityset') _numeric_types = vtypes.PandasTypes._pandas_numerics _categorical_types", "self.convert_variable_type(variable_id, vtypes.Index, convert_data=False) self.index = variable_id def set_secondary_time_index(self, secondary_time_index): for", "if not make_index: # Case 4: user names index, it", "for value in self.data.values()]) @property def df(self): '''Dataframe providing the", "Name of id column in the dataframe. time_index (str): Name", "if time_index is not None and time_index not in df.columns:", ":class:`.Variable` : Instance of variable. Raises: RuntimeError : if no", "assert len(df.columns) == len(set(df.columns)), \"Duplicate column names\" for c in", "to types (:class:`.Variable`) or type_string (str) or (type, kwargs) to", "= None if unique: assert self.df.index.is_unique, \"Index is not unique", "as vtypes from featuretools.utils.entity_utils import ( col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars,", "An Entity is analogous to a table in a relational", "arguments to the Variable. index (str): Name of index column", "across all child entities. make_index (bool, optional) : If True,", "__getitem__(self, variable_id): return self._get_variable(variable_id) def _get_variable(self, variable_id): \"\"\"Get variable instance", "if self.entityset.time_type is None: self.entityset.time_type = time_type elif self.entityset.time_type !=", "how vtype can be tuple vtype = inferred_variable_types[v] if isinstance(vtype,", "the entity.''' return self.data[\"df\"] @df.setter def df(self, _df): self.data[\"df\"] =", "in self.variables: repr_out += u\"\\n {} (dtype: {})\".format(v.id, v.type_string) shape", "idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) else: fraction = counts[idx] / total_count", "dataframe Args: variable_types (dict[str -> types/str/dict[str -> type]]) : An", "variable_types (dict[str -> type/str/dict[str -> type]]) : An entity's variable_types", "(str): Name of id column in the dataframe. time_index (str):", "Entity. variable_types (dict[str -> type/str/dict[str -> type]]) : An entity's", "== variable_id: return v raise KeyError(\"Variable: %s not found in", "= secondary_time_index def _create_index(index, make_index, df): '''Handles index creation logic", "for r in self.entityset.relationships: if variable in [r.child_variable, r.parent_variable]: skip", "cannot convert the underlying data Examples: >>> from featuretools.tests.testing_utils import", "of index column time_index (str or None): Name of time_index", "time indexes\" % (self.id, time_type)) if is_instance(self.df, (dd, ks), 'DataFrame'):", "An entity's variable_types dict maps string variable ids to types", "self.set_index(index) self.time_index = None if time_index: self.set_time_index(time_index, already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index) def", "or (type, kwargs) to pass keyword arguments to the Variable.", "providing the data for the entity. entityset (EntitySet): Entityset for", "index creation logic base on user input''' created_index = None", "{})\".format(v.id, v.type_string) shape = self.shape repr_out += u\"\\n Shape:\\n (Rows:", "or type_string (str) or (type, kwargs) to pass keyword arguments", "-= counts[idx] else: break self.entityset.reset_data_description() def delete_variables(self, variable_ids): \"\"\" Remove", "if variable is not a list if not isinstance(variable_ids, list):", "# TODO document how vtype can be tuple vtype =", "tuple): # vtype is (ft.Variable, dict_of_kwargs) _v = vtype[0](v, self,", "(int) : Maximum number of values per variable to add.", "path) if isinstance(df, dd.DataFrame): df[index] = 1 df[index] = df[index].cumsum()", "created_index, index, df def _validate_entity_params(id, df, time_index): '''Validation checks for", "variable_types(self): '''Dictionary mapping variable id's to variable types''' return {v.id:", "TypeError(\"%s time index is %s type which differs from\" \"", "self_var, other_var in variables.values(): if not self_var.__eq__(other_var, deep=True): return False", "def _create_variables(self, variable_types, index, time_index, secondary_time_index): \"\"\"Extracts the variables from", "Text, find_variable_types ks = import_or_none('databricks.koalas') logger = logging.getLogger('featuretools.entityset') _numeric_types =", "= self._get_variable(variable_id) new_variable = new_type.create_from(variable) self.variables[self.variables.index(variable)] = new_variable def _create_variables(self,", "last_time_index=None, already_sorted=False, make_index=False, verbose=False): \"\"\" Create Entity Args: id (str):", "= vtypes.NumericTimeIndex if col_is_datetime(self.df[variable_id]): t = vtypes.DatetimeTimeIndex # use stable", "# total_count -= counts[idx] else: break self.entityset.reset_data_description() def delete_variables(self, variable_ids):", "already present\") elif index not in df.columns: if not make_index:", "if self.index != other.index: return False if self.time_index != other.time_index:", "False if len(self.variables) != len(other.variables): return False if set(self.variables) !=", "- consider removing this constraints # don't add interesting values", "# and add interesting values to each variable total_count =", "columns in the dataframe to the time index column they", "in dataframe. \"\"\" _validate_entity_params(id, df, time_index) created_index, index, df =", "(dict[str -> type/str/dict[str -> type]]) : An entity's variable_types dict", "not isinstance(c, str): raise ValueError(\"All column names must be strings", "TypeError(\"%s time index not recognized as numeric or\" \" datetime\"", "isinstance(df, dd.DataFrame): df[index] = 1 df[index] = df[index].cumsum() - 1", "(variable_id)) @property def variable_types(self): '''Dictionary mapping variable id's to variable", "in variable_ids: v = self._get_variable(v_id) self.variables.remove(v) def set_time_index(self, variable_id, already_sorted=False):", "True # skip sorting else: t = vtypes.NumericTimeIndex if col_is_datetime(self.df[variable_id]):", "variable_types=inferred_variable_types) # make sure index is at the beginning index_variable", "(variable, ) for self_var, other_var in variables.values(): if not self_var.__eq__(other_var,", "specify column name assert not make_index, \"Must specify an index", "but no index supplied, use first column warnings.warn((\"Using first column", "verbose: msg = \"Variable {}: Marking {} as an \"", "return self.data[\"last_time_index\"] @last_time_index.setter def last_time_index(self, lti): self.data[\"last_time_index\"] = lti def", "if not _dataframes_equal(self.df, other.df): return False variables = {variable: (variable,", "= u\"Entity: {}\\n\".format(self.id) repr_out += u\" Variables:\" for v in", "entity's internal dataframe, optionaly making sure data is sorted, reference", "True\" # Case 2: make_index not specified but no index", "else: time_to_check = self.df[time_index].head(1).iloc[0] time_type = _check_time_type(time_to_check) if time_type is", "\"\"\"Extracts the variables from a dataframe Args: variable_types (dict[str ->", "not recognized as numeric or\" \" datetime\" % (self.id)) if", "__init__(self, id, df, entityset, variable_types=None, index=None, time_index=None, secondary_time_index=None, last_time_index=None, already_sorted=False,", "Args: variable_id (str) : Id of variable to get. Returns:", "entity. convert_data (bool) : If True, convert underlying data in", "if verbose: msg = \"Variable {}: Marking {} as an", "interesting values for categorical variables, to be used to generate", "database See Also: :class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet` \"\"\" def __init__(self, id,", "_create_variables(self, variable_types, index, time_index, secondary_time_index): \"\"\"Extracts the variables from a", "in self.variables]] self.set_index(self.index) if self.time_index is not None: self.set_time_index(self.time_index, already_sorted=already_sorted)", "= _check_time_type(time_to_check) if time_type is None: raise TypeError(\"%s time index", "self.entityset = entityset self.data = {'df': df, 'last_time_index': last_time_index} self.created_index", "self.variables: repr_out += u\"\\n {} (dtype: {})\".format(v.id, v.type_string) shape =", "vtypes.DatetimeTimeIndex # use stable sort if not already_sorted: # sort", "'''Validation checks for Entity inputs''' assert isinstance(id, str), \"Entity id", "index. unique (bool) : Whether to assert that the index", "0.05 and fraction < 0.95: if verbose: msg = \"Variable", "or {} string_to_class_map = find_variable_types() # TODO: Remove once Text", "not None and time_index not in df.columns: raise LookupError('Time index", "the dataframe. time_index (str): Name of time column in the", "data is sorted, reference indexes to other entities are consistent,", "raise TypeError(\"%s time index is %s type which differs from\"", "in df. does not specify # make_index. Make new index", "import ( col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types ) from featuretools.utils.gen_utils", ": Id of variable to get. Returns: :class:`.Variable` : Instance", "the dataframe. secondary_time_index (dict[str -> str]): Dictionary mapping columns in", "convert data once we've inferred self.df = convert_all_variable_data(df=self.df, variable_types=inferred_variable_types) #", "indexes to other entities are consistent, and last_time_indexes are consistent.", "self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description() def add_interesting_values(self, max_values=5, verbose=False): \"\"\" Find interesting values", "in self.entityset.relationships: if variable in [r.child_variable, r.parent_variable]: skip = True", "name assert not make_index, \"Must specify an index name if", "sort by count, # and add interesting values to each", "self.created_index = created_index self._verbose = verbose secondary_time_index = secondary_time_index or", "variable ids to types (:class:`.Variable`) or type_strings (str) or (type,", "recognized as numeric or\" \" datetime\" % (self.id)) if self.entityset.time_type", "to add. verbose (bool) : If True, print summary of", "first column warnings.warn((\"Using first column as index. \" \"To change", "convert_data=False) self.time_index = variable_id def set_index(self, variable_id, unique=True): \"\"\" Args:", "idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) # total_count -= counts[idx] else: break", "index self.df = self.df.sort_values([variable_id, self.index]) self.convert_variable_type(variable_id, t, convert_data=False) self.time_index =", "from self.variables Args: variable_ids (list[str]): Variables to delete Returns: None", "update_data(self, df, already_sorted=False, recalculate_last_time_indexes=True): '''Update entity's internal dataframe, optionaly making", "entity's dataframe and from self.variables Args: variable_ids (list[str]): Variables to", "!= time_type: raise TypeError(\"%s time index is %s type which", ": An entity's variable_types dict maps string variable ids to", "to convert to. entityset (:class:`.BaseEntitySet`) : EntitySet associated with this", "(str): Name of index column time_index (str or None): Name", "= new_variable def _create_variables(self, variable_types, index, time_index, secondary_time_index): \"\"\"Extracts the", "self.data[\"last_time_index\"] = lti def __hash__(self): return id(self.id) def __getitem__(self, variable_id):", "each variable total_count = np.sum(counts) counts[:] = counts.sort_values()[::-1] for i", "to set as index. unique (bool) : Whether to assert", "from featuretools.utils.entity_utils import ( col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types )", "for the entity. entityset (EntitySet): Entityset for this Entity. variable_types", "es[\"customers\"].convert_variable_type(\"engagement_level\", vtypes.Categorical) \"\"\" if convert_data: # first, convert the underlying", "'''Dataframe providing the data for the entity.''' return self.data[\"df\"] @df.setter", "TypeError('variable_ids must be a list of variable names') if len(variable_ids)", "is_instance(self.df, (dd, ks), 'DataFrame') or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype] else:", "entities. ''' return self.data[\"last_time_index\"] @last_time_index.setter def last_time_index(self, lti): self.data[\"last_time_index\"] =", "if make_index is True\" # Case 2: make_index not specified", "warnings # (Case 4 also uses this code path) if", "change this, specify the index parameter\")) index = df.columns[0] elif", "return False variables = {variable: (variable, ) for variable in", "be used to generate \"where\" clauses Args: max_values (int) :", "variable in self.variables: # some heuristics to find basic 'where'-able", "convert to. entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity.", "not None: return False elif self.last_time_index is not None and", "_dataframes_equal(self.df, other.df): return False variables = {variable: (variable, ) for", "index, time_index, secondary_time_index) self.df = df[[v.id for v in self.variables]]", "df def _validate_entity_params(id, df, time_index): '''Validation checks for Entity inputs'''", "\"\"\" Remove variables from entity's dataframe and from self.variables Args:", "df.columns: # Case 3: user wanted to make index but", "vtype = variable_types[vid] if isinstance(vtype, str): if vtype in string_to_class_map:", "values to each variable total_count = np.sum(counts) counts[:] = counts.sort_values()[::-1]", "or {} self._create_variables(variable_types, index, time_index, secondary_time_index) self.df = df[[v.id for", "column warnings.warn((\"Using first column as index. \" \"To change this,", "warnings.warn(\"Variable type {} was unrecognized, Unknown variable type was used", "\"interesting value\" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) else: fraction =", "variable to add. verbose (bool) : If True, print summary", "string)\".format(c)) if time_index is not None and time_index not in", "new {} column\".format(v.id)) # Make sure column ordering matches variable", "np.sum(counts) counts[:] = counts.sort_values()[::-1] for i in range(min(max_values, len(counts.index))): idx", "id must be a string\" assert len(df.columns) == len(set(df.columns)), \"Duplicate", "'''Update entity's internal dataframe, optionaly making sure data is sorted,", "EntitySet. Raises: RuntimeError : Raises if it cannot convert the", "index, df = _create_index(index, make_index, df) self.id = id self.entityset", "False elif self.last_time_index is not None and other.last_time_index is not", "unique. \"\"\" if isinstance(self.df, pd.DataFrame): self.df = self.df.set_index(self.df[variable_id], drop=False) self.df.index.name", "specify an index name if make_index is True\" # Case", "True, print summary of interesting values found. Returns: None \"\"\"", "and create a new column of that name using integers", "elif make_index and index in df.columns: # Case 3: user", "total_count if fraction > 0.05 and fraction < 0.95: if", "that name using integers the (0, len(dataframe)). Otherwise, assume index", "None \"\"\" # check if variable is not a list", "to variable types''' return {v.id: type(v) for v in self.variables}", "max_values=5, verbose=False): \"\"\" Find interesting values for categorical variables, to", "shape(self): '''Shape of the entity's dataframe''' return self.df.shape def __eq__(self,", "string\" assert len(df.columns) == len(set(df.columns)), \"Duplicate column names\" for c", "(list[str]): Variables to delete Returns: None \"\"\" # check if", "verbose=False): \"\"\" Create Entity Args: id (str): Id of Entity.", "str]): Dictionary mapping columns in the dataframe to the time", "raise KeyError(\"Variable: %s not found in entity\" % (variable_id)) @property", "old variable with the new one, maintaining order variable =", "if fraction > 0.05 and fraction < 0.95: if verbose:", "# add the value to interesting_values if it represents more", "optionaly making sure data is sorted, reference indexes to other", "other entityset time indexes\" % (self.id, time_type)) if is_instance(self.df, (dd,", "ValueError(\"Updated dataframe is missing new {} column\".format(v.id)) # Make sure", "convert_variable_data, get_linked_vars, infer_variable_types ) from featuretools.utils.gen_utils import import_or_none, is_instance from", "False elif self.last_time_index is not None and other.last_time_index is None:", "[r.child_variable, r.parent_variable]: skip = True break if skip: continue counts", "entityset time indexes\" % (self.id, time_type)) if is_instance(self.df, (dd, ks),", "See Also: :class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet` \"\"\" def __init__(self, id, df,", "not in variable_types: variable_types[index] = vtypes.Index link_vars = get_linked_vars(self) inferred_variable_types", "v.id not in df.columns: raise ValueError(\"Updated dataframe is missing new", "v.id == index][0] self.variables = [index_variable] + [v for v", "categorical variables, to be used to generate \"where\" clauses Args:", "es = make_ecommerce_entityset() >>> es[\"customers\"].convert_variable_type(\"engagement_level\", vtypes.Categorical) \"\"\" if convert_data: #", "df.columns: raise ValueError(\"Updated dataframe is missing new {} column\".format(v.id)) #", "index is unique. \"\"\" if isinstance(self.df, pd.DataFrame): self.df = self.df.set_index(self.df[variable_id],", "v in self.variables} def convert_variable_type(self, variable_id, new_type, convert_data=True, **kwargs): \"\"\"Convert", "set(self.variables) != set(other.variables): return False if deep: if self.last_time_index is", "index in df.columns: # Case 3: user wanted to make", "entityset, variable_types=None, index=None, time_index=None, secondary_time_index=None, last_time_index=None, already_sorted=False, make_index=False, verbose=False): \"\"\"", "to a list of columns that depend on that secondary", "dataframe. secondary_time_index (dict[str -> str]): Dictionary mapping columns in the", "variables = [] variable_types = variable_types.copy() or {} string_to_class_map =", "vtypes.NumericTimeIndex if col_is_datetime(self.df[variable_id]): t = vtypes.DatetimeTimeIndex # use stable sort", "return v raise KeyError(\"Variable: %s not found in entity\" %", "infer_variable_types ) from featuretools.utils.gen_utils import import_or_none, is_instance from featuretools.utils.wrangle import", "index not recognized as numeric or\" \" datetime\" % (self.id))", "is None: raise TypeError(\"%s time index not recognized as numeric", "interesting values for entities in relationships skip = False for", "1 elif is_instance(df, ks, 'DataFrame'): df = df.koalas.attach_id_column('distributed-sequence', index) else:", "exist with provided id \"\"\" for v in self.variables: if", "self.entityset.reset_data_description() def add_interesting_values(self, max_values=5, verbose=False): \"\"\" Find interesting values for", "repr_out += u\"\\n {} (dtype: {})\".format(v.id, v.type_string) shape = self.shape", "{})\".format(self.id) self.convert_variable_type(variable_id, vtypes.Index, convert_data=False) self.index = variable_id def set_secondary_time_index(self, secondary_time_index):", "index not in variable_types: variable_types[index] = vtypes.Index link_vars = get_linked_vars(self)", "Name of an existing variable to set as index. unique", "self.data[\"df\"] = _df @property def last_time_index(self): ''' Time index of", "the old variable with the new one, maintaining order variable", "entities are consistent, and last_time_indexes are consistent. ''' if len(df.columns)", "= vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype] else: time_to_check = self.df[variable_id].iloc[0] time_type = _check_time_type(time_to_check) if", "vtype = inferred_variable_types[v] if isinstance(vtype, tuple): # vtype is (ft.Variable,", ": If True, print summary of interesting values found. Returns:", "\"\"\"Get variable instance Args: variable_id (str) : Id of variable", "_validate_entity_params(id, df, time_index) created_index, index, df = _create_index(index, make_index, df)", "missing new {} column\".format(v.id)) # Make sure column ordering matches", "in inferred_variable_types: # TODO document how vtype can be tuple", "25: if verbose: msg = \"Variable {}: Marking {} as", "ordering self.df = df[[v.id for v in self.variables]] self.set_index(self.index) if", "variable is not a list if not isinstance(variable_ids, list): raise", "this Entity. variable_types (dict[str -> type/str/dict[str -> type]]) : An", "self.df.set_index(self.df[variable_id], drop=False) self.df.index.name = None if unique: assert self.df.index.is_unique, \"Index", "Variables to delete Returns: None \"\"\" # check if variable", "self.df[time_index].head(1).iloc[0] time_type = _check_time_type(time_to_check) if time_type is None: raise TypeError(\"%s", "index column and warn warnings.warn(\"index {} not found in dataframe,", "was unrecognized, Unknown variable type was used instead\".format(vtype)) if index", "# 25% of the values we have not seen so", "1: user wanted to make index but did not specify", "variables if v.id == index][0] self.variables = [index_variable] + [v", "# Case 3: user wanted to make index but column", "''' return self.data[\"last_time_index\"] @last_time_index.setter def last_time_index(self, lti): self.data[\"last_time_index\"] = lti", "= np.sum(counts) counts[:] = counts.sort_values()[::-1] for i in range(min(max_values, len(counts.index))):", "last_time_index(self): ''' Time index of the last event for each", "< 25: if verbose: msg = \"Variable {}: Marking {}", "convert the underlying data (or at least try to) self.df", "for v in self.variables: if v.id == variable_id: return v", "= infer_variable_types(self.df, link_vars, variable_types, time_index, secondary_time_index) inferred_variable_types.update(variable_types) for v in", "self._verbose = verbose secondary_time_index = secondary_time_index or {} self._create_variables(variable_types, index,", "variable_types.copy() or {} string_to_class_map = find_variable_types() # TODO: Remove once", "secondary_time_index or {} self._create_variables(variable_types, index, time_index, secondary_time_index) self.df = df[[v.id", "assert not make_index, \"Must specify an index name if make_index", "Entityset for this Entity. variable_types (dict[str -> type/str/dict[str -> type]])", "not seen so far if len(counts.index) < 25: if verbose:", "Text for vid in variable_types.copy(): vtype = variable_types[vid] if isinstance(vtype,", "map to a list of columns that depend on that", "values found. Returns: None \"\"\" for variable in self.variables: #", "mapping variable id's to variable types''' return {v.id: type(v) for", "= import_or_none('databricks.koalas') logger = logging.getLogger('featuretools.entityset') _numeric_types = vtypes.PandasTypes._pandas_numerics _categorical_types =", "(dtype: {})\".format(v.id, v.type_string) shape = self.shape repr_out += u\"\\n Shape:\\n", "v.id == variable_id: return v raise KeyError(\"Variable: %s not found", "% (variable_id)) @property def variable_types(self): '''Dictionary mapping variable id's to", "# first, convert the underlying data (or at least try", "@property def shape(self): '''Shape of the entity's dataframe''' return self.df.shape", "checks for Entity inputs''' assert isinstance(id, str), \"Entity id must", "set_index(self, variable_id, unique=True): \"\"\" Args: variable_id (string) : Name of", "column in the dataframe. time_index (str): Name of time column", "If True, assume index does not exist as a column", "def df(self): '''Dataframe providing the data for the entity.''' return", "variable_types[vid] = string_to_class_map['unknown'] warnings.warn(\"Variable type {} was unrecognized, Unknown variable", "self.variables: if v.id not in df.columns: raise ValueError(\"Updated dataframe is", "featuretools.variable_types import Text, find_variable_types ks = import_or_none('databricks.koalas') logger = logging.getLogger('featuretools.entityset')", "self.variables} def convert_variable_type(self, variable_id, new_type, convert_data=True, **kwargs): \"\"\"Convert variable in", "str): raise ValueError(\"All column names must be strings (Column {}", "that depend on that secondary time \"\"\" variables = []", "Case 6: user specified index, which is already in df.", "sorting else: t = vtypes.NumericTimeIndex if col_is_datetime(self.df[variable_id]): t = vtypes.DatetimeTimeIndex", "clauses Args: max_values (int) : Maximum number of values per", "if not already_sorted: # sort by time variable, then by", "self.convert_variable_type(variable_id, t, convert_data=False) self.time_index = variable_id def set_index(self, variable_id, unique=True):", "in self.variables: if v.id == variable_id: return v raise KeyError(\"Variable:", "# Case 6: user specified index, which is already in", "event for each instance across all child entities. make_index (bool,", "make sure index is at the beginning index_variable = [v", "not specified but no index supplied, use first column warnings.warn((\"Using", "not a list if not isinstance(variable_ids, list): raise TypeError('variable_ids must", "= True # skip sorting else: t = vtypes.NumericTimeIndex if", "time_type)) if is_instance(self.df, (dd, ks), 'DataFrame'): t = time_type #", "= inferred_variable_types[v](v, self) variables += [_v] # convert data once", "self.entityset.relationships: if variable in [r.child_variable, r.parent_variable]: skip = True break", "variable_types[index] = vtypes.Index link_vars = get_linked_vars(self) inferred_variable_types = infer_variable_types(self.df, link_vars,", "index of the last event for each instance across all", ": Raises if it cannot convert the underlying data Examples:", "df, already_sorted=False, recalculate_last_time_indexes=True): '''Update entity's internal dataframe, optionaly making sure", "Find interesting values for categorical variables, to be used to", "isinstance(vtype, str): if vtype in string_to_class_map: variable_types[vid] = string_to_class_map[vtype] else:", "\" \"To change this, specify the index parameter\")) index =", "first column as index. \" \"To change this, specify the", "raise TypeError(\"%s time index not recognized as numeric or\" \"", "self.df.shape def __eq__(self, other, deep=False): if self.index != other.index: return", "index is at the beginning index_variable = [v for v", "from featuretools.utils.wrangle import _check_time_type, _dataframes_equal from featuretools.variable_types import Text, find_variable_types", "entities in relationships skip = False for r in self.entityset.relationships:", "(str) : Id of variable to get. Returns: :class:`.Variable` :", "entityset self.data = {'df': df, 'last_time_index': last_time_index} self.created_index = created_index", "once Text has been removed from variable types string_to_class_map[Text.type_string] =", "not unique on dataframe \" \\ \"(Entity {})\".format(self.id) self.convert_variable_type(variable_id, vtypes.Index,", "or type_strings (str) or (type, kwargs) to pass keyword arguments", "metadata and data An Entity is analogous to a table", "for variable in self.variables: # some heuristics to find basic", "column in dataframe, and create a new column of that", "None if index is None: # Case 1: user wanted", "def set_time_index(self, variable_id, already_sorted=False): # check time type if not", "str), \"Entity id must be a string\" assert len(df.columns) ==", "provided id \"\"\" for v in self.variables: if v.id ==", "expecting {}\".format(len(df.columns), len(self.variables))) for v in self.variables: if v.id not", "\"\"\"Convert variable in dataframe to different type Args: variable_id (str)", "the index is unique. \"\"\" if isinstance(self.df, pd.DataFrame): self.df =", "\" other entityset time indexes\" % (self.id, time_type)) if time_index", "self._create_variables(variable_types, index, time_index, secondary_time_index) self.df = df[[v.id for v in", ": If True, assume index does not exist as a", "vtype in string_to_class_map: variable_types[vid] = string_to_class_map[vtype] else: variable_types[vid] = string_to_class_map['unknown']", "time_index not in columns: columns.append(time_index) self.secondary_time_index = secondary_time_index def _create_index(index,", "entity's variable_types dict maps string variable ids to types (:class:`.Variable`)", "(self.id, time_type)) if is_instance(self.df, (dd, ks), 'DataFrame'): t = time_type", "\"\"\" variables = [] variable_types = variable_types.copy() or {} string_to_class_map", "convert_variable_data(df=self.df, column_id=variable_id, new_type=new_type, **kwargs) # replace the old variable with", "variable id's to variable types''' return {v.id: type(v) for v", "to make index but did not specify column name assert", "as index. unique (bool) : Whether to assert that the", "Entity. df (pd.DataFrame): Dataframe providing the data for the entity.", "r.parent_variable]: skip = True break if skip: continue counts =", "variable instance Args: variable_id (str) : Id of variable to", "= vtype[0](v, self, **vtype[1]) else: _v = inferred_variable_types[v](v, self) variables", "index variable already present\") elif index not in df.columns: if", "data Examples: >>> from featuretools.tests.testing_utils import make_ecommerce_entityset >>> es =", "new column of that name using integers the (0, len(dataframe)).", "+= u\"\\n {} (dtype: {})\".format(v.id, v.type_string) shape = self.shape repr_out", "if v.id not in df.columns: raise ValueError(\"Updated dataframe is missing", "\"interesting value\" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) # total_count -=", "class Entity(object): \"\"\"Represents an entity in a Entityset, and stores", "t = time_type # skip checking values already_sorted = True", "self.df.index.name = None if unique: assert self.df.index.is_unique, \"Index is not", "it is not in df. does not specify # make_index.", "logging import warnings import dask.dataframe as dd import numpy as", "already_sorted = True # skip sorting else: t = vtypes.NumericTimeIndex", "if isinstance(variable, vtypes.Discrete): variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype) # TODO - consider", "Otherwise, assume index exists in dataframe. \"\"\" _validate_entity_params(id, df, time_index)", "len(set(df.columns)), \"Duplicate column names\" for c in df.columns: if not", "present\") elif index not in df.columns: if not make_index: #", "df[index] = df[index].cumsum() - 1 elif is_instance(df, ks, 'DataFrame'): df", "shape = self.shape repr_out += u\"\\n Shape:\\n (Rows: {}, Columns:", "index, df def _validate_entity_params(id, df, time_index): '''Validation checks for Entity", "for self_var, other_var in variables.values(): if not self_var.__eq__(other_var, deep=True): return", "# Case 2: make_index not specified but no index supplied,", "dataframe. \"\"\" _validate_entity_params(id, df, time_index) created_index, index, df = _create_index(index,", "exists raise RuntimeError(\"Cannot make index: index variable already present\") elif", "verbose=False): \"\"\" Find interesting values for categorical variables, to be", "featuretools.utils.entity_utils import ( col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types ) from", "are consistent. ''' if len(df.columns) != len(self.variables): raise ValueError(\"Updated dataframe", "(bool) : If True, convert underlying data in the EntitySet.", "ValueError(\"Updated dataframe contains {} columns, expecting {}\".format(len(df.columns), len(self.variables))) for v", "Case 4: user names index, it is not in df.", "Args: id (str): Id of Entity. df (pd.DataFrame): Dataframe providing", "len(counts.index))): idx = counts.index[i] # add the value to interesting_values", "in dataframe to different type Args: variable_id (str) : Id", "= convert_variable_data(df=self.df, column_id=variable_id, new_type=new_type, **kwargs) # replace the old variable", "variables += [_v] # convert data once we've inferred self.df", "to convert. new_type (subclass of `Variable`) : Type of variable", "return self.df = self.df.drop(variable_ids, axis=1) for v_id in variable_ids: v", "convert the underlying data Examples: >>> from featuretools.tests.testing_utils import make_ecommerce_entityset", "df.insert(0, index, range(len(df))) created_index = index # Case 6: user", "consistent. ''' if len(df.columns) != len(self.variables): raise ValueError(\"Updated dataframe contains", "df.columns: if not make_index: # Case 4: user names index,", "if col_is_datetime(self.df[variable_id]): t = vtypes.DatetimeTimeIndex # use stable sort if", "{} columns, expecting {}\".format(len(df.columns), len(self.variables))) for v in self.variables: if", "_v = vtype[0](v, self, **vtype[1]) else: _v = inferred_variable_types[v](v, self)", "all child entities. make_index (bool, optional) : If True, assume", "return False if not _dataframes_equal(self.df, other.df): return False variables =", "variable to convert to. entityset (:class:`.BaseEntitySet`) : EntitySet associated with", "self.last_time_index is not None: self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description() def add_interesting_values(self, max_values=5, verbose=False):", "None: raise TypeError(\"%s time index not recognized as numeric or\"", "logging.getLogger('featuretools.entityset') _numeric_types = vtypes.PandasTypes._pandas_numerics _categorical_types = [vtypes.PandasTypes._categorical] _datetime_types = vtypes.PandasTypes._pandas_datetimes", "existing variable to set as index. unique (bool) : Whether", "if time_index: self.set_time_index(time_index, already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index) def __repr__(self): repr_out = u\"Entity:", "first, convert the underlying data (or at least try to)", "make_index and index in df.columns: # Case 3: user wanted", "entities. make_index (bool, optional) : If True, assume index does", "featuretools import variable_types as vtypes from featuretools.utils.entity_utils import ( col_is_datetime,", "self.df = df[[v.id for v in self.variables]] self.set_index(index) self.time_index =", "event for each instance across all child entities. ''' return", "columns: columns.append(time_index) self.secondary_time_index = secondary_time_index def _create_index(index, make_index, df): '''Handles", "did not specify column name assert not make_index, \"Must specify", "differs from\" \" other entityset time indexes\" % (self.id, time_type))", "'DataFrame'): df = df.koalas.attach_id_column('distributed-sequence', index) else: df.insert(0, index, range(len(df))) created_index", "Case 2: make_index not specified but no index supplied, use", "not in columns: columns.append(time_index) self.secondary_time_index = secondary_time_index def _create_index(index, make_index,", "make_index=False, verbose=False): \"\"\" Create Entity Args: id (str): Id of", "= time_type # skip checking values already_sorted = True #", "make_index, df): '''Handles index creation logic base on user input'''", "else: time_to_check = self.df[variable_id].iloc[0] time_type = _check_time_type(time_to_check) if time_type is", "for i in range(min(max_values, len(counts.index))): idx = counts.index[i] # add", "set_secondary_time_index(self, secondary_time_index): for time_index, columns in secondary_time_index.items(): if is_instance(self.df, (dd,", "import_or_none('databricks.koalas') logger = logging.getLogger('featuretools.entityset') _numeric_types = vtypes.PandasTypes._pandas_numerics _categorical_types = [vtypes.PandasTypes._categorical]", "self.entityset.time_type is None: self.entityset.time_type = time_type elif self.entityset.time_type != time_type:", "datetime\" % (self.id)) if self.entityset.time_type is None: self.entityset.time_type = time_type", "featuretools.utils.gen_utils import import_or_none, is_instance from featuretools.utils.wrangle import _check_time_type, _dataframes_equal from", "False if set(self.variables) != set(other.variables): return False if deep: if", "if not self.last_time_index.equals(other.last_time_index): return False if not _dataframes_equal(self.df, other.df): return", "EntitySet associated with this entity. convert_data (bool) : If True,", "logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) else: fraction = counts[idx] /", "in dataframe, and create a new column of that name", "u\"\\n Shape:\\n (Rows: {}, Columns: {})\".format( shape[0], shape[1]) return repr_out", "a column in dataframe, and create a new column of", "entityset time indexes\" % (self.id, time_type)) if time_index not in", "variable ids to types (:class:`.Variable`) or type_string (str) or (type,", "'''Shape of the entity's dataframe''' return self.df.shape def __eq__(self, other,", "Dictionary mapping columns in the dataframe to the time index", "Case 1: user wanted to make index but did not", ":class:`.Variable`, :class:`.EntitySet` \"\"\" def __init__(self, id, df, entityset, variable_types=None, index=None,", "import_or_none, is_instance from featuretools.utils.wrangle import _check_time_type, _dataframes_equal from featuretools.variable_types import", "index is None: # Case 1: user wanted to make", "range(len(df))) created_index = index # Case 6: user specified index,", "self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype] else: time_to_check = self.df[time_index].head(1).iloc[0] time_type =", "''' if len(df.columns) != len(self.variables): raise ValueError(\"Updated dataframe contains {}", "index but did not specify column name assert not make_index,", "4 also uses this code path) if isinstance(df, dd.DataFrame): df[index]", "None and other.last_time_index is not None: if not self.last_time_index.equals(other.last_time_index): return", "of that name using integers the (0, len(dataframe)). Otherwise, assume", "string variable ids to types (:class:`.Variable`) or type_string (str) or", "consider removing this constraints # don't add interesting values for", "t, convert_data=False) self.time_index = variable_id def set_index(self, variable_id, unique=True): \"\"\"", "make_ecommerce_entityset() >>> es[\"customers\"].convert_variable_type(\"engagement_level\", vtypes.Categorical) \"\"\" if convert_data: # first, convert", "variable_types: variable_types[index] = vtypes.Index link_vars = get_linked_vars(self) inferred_variable_types = infer_variable_types(self.df,", "c in df.columns: if not isinstance(c, str): raise ValueError(\"All column", "dataframe. time_index (str): Name of time column in the dataframe.", "continue counts = self.df[variable.id].value_counts() # find how many of each", "a dataframe Args: variable_types (dict[str -> types/str/dict[str -> type]]) :", "raise ValueError(\"Updated dataframe contains {} columns, expecting {}\".format(len(df.columns), len(self.variables))) for", "the new one, maintaining order variable = self._get_variable(variable_id) new_variable =", "for variable in other.variables: variables[variable] += (variable, ) for self_var,", "convert_variable_type(self, variable_id, new_type, convert_data=True, **kwargs): \"\"\"Convert variable in dataframe to", "is not None: self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if recalculate_last_time_indexes and self.last_time_index", "Args: variable_ids (list[str]): Variables to delete Returns: None \"\"\" #", "link_vars = get_linked_vars(self) inferred_variable_types = infer_variable_types(self.df, link_vars, variable_types, time_index, secondary_time_index)", "in self.variables: # some heuristics to find basic 'where'-able variables", "input''' created_index = None if index is None: # Case", "return True def __sizeof__(self): return sum([value.__sizeof__() for value in self.data.values()])", "of interesting values found. Returns: None \"\"\" for variable in", "for v in variables if v.id != index] def update_data(self,", "if skip: continue counts = self.df[variable.id].value_counts() # find how many", "# make sure index is at the beginning index_variable =", "self_var.__eq__(other_var, deep=True): return False return True def __sizeof__(self): return sum([value.__sizeof__()", "index (str): Name of index column time_index (str or None):", "least try to) self.df = convert_variable_data(df=self.df, column_id=variable_id, new_type=new_type, **kwargs) #", "to the time index column they are associated with. last_time_index", "\"\"\" Find interesting values for categorical variables, to be used", "_df @property def last_time_index(self): ''' Time index of the last", "of variable names') if len(variable_ids) == 0: return self.df =", "pass keyword arguments to the Variable. index (str): Name of", "\" datetime\" % (self.id)) if self.entityset.time_type != time_type: raise TypeError(\"%s", "make index: index variable already present\") elif index not in", "variable_types = variable_types.copy() or {} string_to_class_map = find_variable_types() # TODO:", "summary of interesting values found. Returns: None \"\"\" for variable", "# some heuristics to find basic 'where'-able variables if isinstance(variable,", "we've inferred self.df = convert_all_variable_data(df=self.df, variable_types=inferred_variable_types) # make sure index", "in self.variables} for variable in other.variables: variables[variable] += (variable, )", "assert isinstance(id, str), \"Entity id must be a string\" assert", "document how vtype can be tuple vtype = inferred_variable_types[v] if", "in string_to_class_map: variable_types[vid] = string_to_class_map[vtype] else: variable_types[vid] = string_to_class_map['unknown'] warnings.warn(\"Variable", "depend on that secondary time \"\"\" variables = [] variable_types", "None: if not self.last_time_index.equals(other.last_time_index): return False if not _dataframes_equal(self.df, other.df):", "len(dataframe)). Otherwise, assume index exists in dataframe. \"\"\" _validate_entity_params(id, df,", "\\ \"(Entity {})\".format(self.id) self.convert_variable_type(variable_id, vtypes.Index, convert_data=False) self.index = variable_id def", "numeric or\" \" datetime\" % (self.id)) if self.entityset.time_type is None:", "self.variables]] self.set_index(index) self.time_index = None if time_index: self.set_time_index(time_index, already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index)", "interesting values to each variable total_count = np.sum(counts) counts[:] =", "[v for v in variables if v.id == index][0] self.variables", "v in self.variables: repr_out += u\"\\n {} (dtype: {})\".format(v.id, v.type_string)", "user wanted to make index but did not specify column", ">>> from featuretools.tests.testing_utils import make_ecommerce_entityset >>> es = make_ecommerce_entityset() >>>", "= vtypes.PandasTypes._pandas_datetimes class Entity(object): \"\"\"Represents an entity in a Entityset,", "= lti def __hash__(self): return id(self.id) def __getitem__(self, variable_id): return", "time indexes\" % (self.id, time_type)) if time_index not in columns:", "_v = inferred_variable_types[v](v, self) variables += [_v] # convert data", "new_type, convert_data=True, **kwargs): \"\"\"Convert variable in dataframe to different type", "fraction < 0.95: if verbose: msg = \"Variable {}: Marking", "No action needed. return created_index, index, df def _validate_entity_params(id, df,", "child entities. ''' return self.data[\"last_time_index\"] @last_time_index.setter def last_time_index(self, lti): self.data[\"last_time_index\"]", "total_count -= counts[idx] else: break self.entityset.reset_data_description() def delete_variables(self, variable_ids): \"\"\"", "values per variable to add. verbose (bool) : If True,", "maps string variable ids to types (:class:`.Variable`) or type_strings (str)", "'last_time_index': last_time_index} self.created_index = created_index self._verbose = verbose secondary_time_index =", "(bool) : If True, print summary of interesting values found.", "a string)\".format(c)) if time_index is not None and time_index not", "the dataframe to the time index column they are associated", "else: fraction = counts[idx] / total_count if fraction > 0.05", "\"\"\"Represents an entity in a Entityset, and stores relevant metadata", "= vtypes.Index link_vars = get_linked_vars(self) inferred_variable_types = infer_variable_types(self.df, link_vars, variable_types,", "secondary_time_index def _create_index(index, make_index, df): '''Handles index creation logic base", "pandas as pd from featuretools import variable_types as vtypes from", "counts[idx] else: break self.entityset.reset_data_description() def delete_variables(self, variable_ids): \"\"\" Remove variables", "other.secondary_time_index: return False if len(self.variables) != len(other.variables): return False if", "(self.id, time_type)) if time_index not in columns: columns.append(time_index) self.secondary_time_index =", "the data for the entity.''' return self.data[\"df\"] @df.setter def df(self,", "must be a string\" assert len(df.columns) == len(set(df.columns)), \"Duplicate column", "raise TypeError('variable_ids must be a list of variable names') if", "name if make_index is True\" # Case 2: make_index not", "df, time_index) created_index, index, df = _create_index(index, make_index, df) self.id", "the index parameter\")) index = df.columns[0] elif make_index and index", "creation logic base on user input''' created_index = None if", "is_instance from featuretools.utils.wrangle import _check_time_type, _dataframes_equal from featuretools.variable_types import Text,", "import pandas as pd from featuretools import variable_types as vtypes", "= logging.getLogger('featuretools.entityset') _numeric_types = vtypes.PandasTypes._pandas_numerics _categorical_types = [vtypes.PandasTypes._categorical] _datetime_types =", "recalculate_last_time_indexes and self.last_time_index is not None: self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description() def add_interesting_values(self,", "def set_secondary_time_index(self, secondary_time_index): for time_index, columns in secondary_time_index.items(): if is_instance(self.df,", "self.shape repr_out += u\"\\n Shape:\\n (Rows: {}, Columns: {})\".format( shape[0],", "created_index self._verbose = verbose secondary_time_index = secondary_time_index or {} self._create_variables(variable_types,", "index is %s type which differs from\" \" other entityset", "if deep: if self.last_time_index is None and other.last_time_index is not", "last event for each instance across all child entities. make_index", "= entityset self.data = {'df': df, 'last_time_index': last_time_index} self.created_index =", "variable.interesting_values.append(pd.Series([idx])) else: fraction = counts[idx] / total_count if fraction >", "user input''' created_index = None if index is None: #", "df. does not specify # make_index. Make new index column", "already_sorted=False): # check time type if not isinstance(self.df, pd.DataFrame) or", "to each variable total_count = np.sum(counts) counts[:] = counts.sort_values()[::-1] for", "column names\" for c in df.columns: if not isinstance(c, str):", "entityset (:class:`.BaseEntitySet`) : EntitySet associated with this entity. convert_data (bool)", "\" \"is not a string)\".format(c)) if time_index is not None", "variable_id): return self._get_variable(variable_id) def _get_variable(self, variable_id): \"\"\"Get variable instance Args:", "/ total_count if fraction > 0.05 and fraction < 0.95:", "def delete_variables(self, variable_ids): \"\"\" Remove variables from entity's dataframe and", "Instance of variable. Raises: RuntimeError : if no variable exist", "variables.values(): if not self_var.__eq__(other_var, deep=True): return False return True def", "in [r.child_variable, r.parent_variable]: skip = True break if skip: continue", "% (self.id, time_type)) if time_index not in columns: columns.append(time_index) self.secondary_time_index", "convert_all_variable_data(df=self.df, variable_types=inferred_variable_types) # make sure index is at the beginning", "fraction = counts[idx] / total_count if fraction > 0.05 and", "Args: max_values (int) : Maximum number of values per variable", "= self.df[variable_id].iloc[0] time_type = _check_time_type(time_to_check) if time_type is None: raise", "isinstance(vtype, tuple): # vtype is (ft.Variable, dict_of_kwargs) _v = vtype[0](v,", "must be strings (Column {} \" \"is not a string)\".format(c))", "_check_time_type, _dataframes_equal from featuretools.variable_types import Text, find_variable_types ks = import_or_none('databricks.koalas')", "return False if set(self.variables) != set(other.variables): return False if deep:", "warnings import dask.dataframe as dd import numpy as np import", "names must be strings (Column {} \" \"is not a", "not make_index, \"Must specify an index name if make_index is", "!= other.index: return False if self.time_index != other.time_index: return False", "entity.''' return self.data[\"df\"] @df.setter def df(self, _df): self.data[\"df\"] = _df", "entity\" % (variable_id)) @property def variable_types(self): '''Dictionary mapping variable id's", "= pd.Series(dtype=variable.entity.df[variable.id].dtype) # TODO - consider removing this constraints #", "msg += \"interesting value\" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) else:", "variable_id def set_index(self, variable_id, unique=True): \"\"\" Args: variable_id (string) :", "find_variable_types ks = import_or_none('databricks.koalas') logger = logging.getLogger('featuretools.entityset') _numeric_types = vtypes.PandasTypes._pandas_numerics", "\"where\" clauses Args: max_values (int) : Maximum number of values", "= vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype] else: time_to_check = self.df[time_index].head(1).iloc[0] time_type = _check_time_type(time_to_check) if", "def variable_types(self): '''Dictionary mapping variable id's to variable types''' return", "( col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types ) from featuretools.utils.gen_utils import", "elif self.last_time_index is not None and other.last_time_index is not None:", "# don't add interesting values for entities in relationships skip", "v in self.variables: if v.id == variable_id: return v raise", "4: user names index, it is not in df. does", "variable with the new one, maintaining order variable = self._get_variable(variable_id)", "variable_types as vtypes from featuretools.utils.entity_utils import ( col_is_datetime, convert_all_variable_data, convert_variable_data,", "= id self.entityset = entityset self.data = {'df': df, 'last_time_index':", "\"is not a string)\".format(c)) if time_index is not None and", "True, assume index does not exist as a column in", "Name of index column time_index (str or None): Name of", "logic base on user input''' created_index = None if index", "__sizeof__(self): return sum([value.__sizeof__() for value in self.data.values()]) @property def df(self):", "removed from variable types string_to_class_map[Text.type_string] = Text for vid in", "to different type Args: variable_id (str) : Id of variable", "index name if make_index is True\" # Case 2: make_index", "data An Entity is analogous to a table in a", "skip checking values already_sorted = True # skip sorting else:", "[] variable_types = variable_types.copy() or {} string_to_class_map = find_variable_types() #", "len(other.variables): return False if set(self.variables) != set(other.variables): return False if", "variables, to be used to generate \"where\" clauses Args: max_values", "import _check_time_type, _dataframes_equal from featuretools.variable_types import Text, find_variable_types ks =", "entityset (EntitySet): Entityset for this Entity. variable_types (dict[str -> type/str/dict[str", "(or at least try to) self.df = convert_variable_data(df=self.df, column_id=variable_id, new_type=new_type,", "types string_to_class_map[Text.type_string] = Text for vid in variable_types.copy(): vtype =", "Entity is analogous to a table in a relational database", "column secondary_time_index (dict[str: [str]]): Dictionary of secondary time columns that", "be a string\" assert len(df.columns) == len(set(df.columns)), \"Duplicate column names\"", "string_to_class_map[Text.type_string] = Text for vid in variable_types.copy(): vtype = variable_types[vid]", "been removed from variable types string_to_class_map[Text.type_string] = Text for vid", ": Whether to assert that the index is unique. \"\"\"", "df[index].cumsum() - 1 elif is_instance(df, ks, 'DataFrame'): df = df.koalas.attach_id_column('distributed-sequence',", "self.df[variable.id].value_counts() # find how many of each unique value there", "needed. return created_index, index, df def _validate_entity_params(id, df, time_index): '''Validation", "if not self_var.__eq__(other_var, deep=True): return False return True def __sizeof__(self):", "column\".format(v.id)) # Make sure column ordering matches variable ordering self.df", "pd.DataFrame) or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype] else: time_to_check = self.df[variable_id].iloc[0]", "{v.id: type(v) for v in self.variables} def convert_variable_type(self, variable_id, new_type,", "False if not _dataframes_equal(self.df, other.df): return False variables = {variable:", "Returns: None \"\"\" for variable in self.variables: # some heuristics", "-> type/str/dict[str -> type]]) : An entity's variable_types dict maps", "already_sorted=already_sorted) self.set_secondary_time_index(secondary_time_index) def __repr__(self): repr_out = u\"Entity: {}\\n\".format(self.id) repr_out +=", "index exists in dataframe. \"\"\" _validate_entity_params(id, df, time_index) created_index, index,", "> 0.05 and fraction < 0.95: if verbose: msg =", "df[[v.id for v in self.variables]] self.set_index(index) self.time_index = None if", "that secondary time \"\"\" variables = [] variable_types = variable_types.copy()", "\"\"\" for v in self.variables: if v.id == variable_id: return", "a new column of that name using integers the (0,", "and from self.variables Args: variable_ids (list[str]): Variables to delete Returns:", "type was used instead\".format(vtype)) if index not in variable_types: variable_types[index]", "not None: self.set_time_index(self.time_index, already_sorted=already_sorted) self.set_secondary_time_index(self.secondary_time_index) if recalculate_last_time_indexes and self.last_time_index is", "import make_ecommerce_entityset >>> es = make_ecommerce_entityset() >>> es[\"customers\"].convert_variable_type(\"engagement_level\", vtypes.Categorical) \"\"\"", "to delete Returns: None \"\"\" # check if variable is", "new_variable def _create_variables(self, variable_types, index, time_index, secondary_time_index): \"\"\"Extracts the variables", "other.last_time_index is not None: return False elif self.last_time_index is not", "@df.setter def df(self, _df): self.data[\"df\"] = _df @property def last_time_index(self):", "sure column ordering matches variable ordering self.df = df[[v.id for", "time index column they are associated with. last_time_index (pd.Series): Time", "raise ValueError(\"Updated dataframe is missing new {} column\".format(v.id)) # Make", "Dictionary of secondary time columns that each map to a", "for each instance across all child entities. ''' return self.data[\"last_time_index\"]", "in variables.values(): if not self_var.__eq__(other_var, deep=True): return False return True", "raise ValueError(\"All column names must be strings (Column {} \"", "ids to types (:class:`.Variable`) or type_string (str) or (type, kwargs)", "False if self.secondary_time_index != other.secondary_time_index: return False if len(self.variables) !=", "not in df.columns: raise LookupError('Time index not found in dataframe')", "by count, # and add interesting values to each variable", "new \" \"integer column\".format(index)) # Case 5: make_index with no", "sort by time variable, then by index self.df = self.df.sort_values([variable_id,", "= None if index is None: # Case 1: user", "# find how many of each unique value there are;", "ids to types (:class:`.Variable`) or type_strings (str) or (type, kwargs)", "to pass keyword arguments to the Variable. index (str): Name", "Text has been removed from variable types string_to_class_map[Text.type_string] = Text", "vtypes.Categorical) \"\"\" if convert_data: # first, convert the underlying data", "they are associated with. last_time_index (pd.Series): Time index of the", "if recalculate_last_time_indexes and self.last_time_index is not None: self.entityset.add_last_time_indexes(updated_entities=[self.id]) self.entityset.reset_data_description() def", "return self.data[\"df\"] @df.setter def df(self, _df): self.data[\"df\"] = _df @property", "time_type is None: raise TypeError(\"%s time index not recognized as", "beginning index_variable = [v for v in variables if v.id", "indexes\" % (self.id, time_type)) if time_index not in columns: columns.append(time_index)", "Case 3: user wanted to make index but column already", "len(counts.index) < 25: if verbose: msg = \"Variable {}: Marking", "'where'-able variables if isinstance(variable, vtypes.Discrete): variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype) # TODO", "import logging import warnings import dask.dataframe as dd import numpy", "secondary_time_index.items(): if is_instance(self.df, (dd, ks), 'DataFrame') or self.df.empty: time_to_check =", "index not in df.columns: if not make_index: # Case 4:", "v in variables if v.id != index] def update_data(self, df,", "else: break self.entityset.reset_data_description() def delete_variables(self, variable_ids): \"\"\" Remove variables from", "isinstance(variable, vtypes.Discrete): variable.interesting_values = pd.Series(dtype=variable.entity.df[variable.id].dtype) # TODO - consider removing", "self.variables[self.variables.index(variable)] = new_variable def _create_variables(self, variable_types, index, time_index, secondary_time_index): \"\"\"Extracts", "making sure data is sorted, reference indexes to other entities", "'''Dictionary mapping variable id's to variable types''' return {v.id: type(v)", "(self.id)) if self.entityset.time_type != time_type: raise TypeError(\"%s time index is", "dataframe to different type Args: variable_id (str) : Id of", "for c in df.columns: if not isinstance(c, str): raise ValueError(\"All", "Name of time_index column secondary_time_index (dict[str: [str]]): Dictionary of secondary", "relationships skip = False for r in self.entityset.relationships: if variable", "in the dataframe. time_index (str): Name of time column in", "self.df = convert_all_variable_data(df=self.df, variable_types=inferred_variable_types) # make sure index is at", "index] def update_data(self, df, already_sorted=False, recalculate_last_time_indexes=True): '''Update entity's internal dataframe,", "time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[variable_id]._default_pandas_dtype] else: time_to_check = self.df[variable_id].iloc[0] time_type = _check_time_type(time_to_check)", "if not isinstance(c, str): raise ValueError(\"All column names must be", "= Text for vid in variable_types.copy(): vtype = variable_types[vid] if", "make_index with no errors or warnings # (Case 4 also", "other.df): return False variables = {variable: (variable, ) for variable", "if isinstance(self.df, pd.DataFrame): self.df = self.df.set_index(self.df[variable_id], drop=False) self.df.index.name = None", "Entity Args: id (str): Id of Entity. df (pd.DataFrame): Dataframe", "secondary_time_index = secondary_time_index or {} self._create_variables(variable_types, index, time_index, secondary_time_index) self.df", "self._get_variable(variable_id) new_variable = new_type.create_from(variable) self.variables[self.variables.index(variable)] = new_variable def _create_variables(self, variable_types,", "Args: variable_id (str) : Id of variable to convert. new_type", "TODO document how vtype can be tuple vtype = inferred_variable_types[v]", "vtypes from featuretools.utils.entity_utils import ( col_is_datetime, convert_all_variable_data, convert_variable_data, get_linked_vars, infer_variable_types", "self.index != other.index: return False if self.time_index != other.time_index: return", "make_index, \"Must specify an index name if make_index is True\"", "already_sorted=False, make_index=False, verbose=False): \"\"\" Create Entity Args: id (str): Id", "is not None: return False elif self.last_time_index is not None", "\" datetime\" % (self.id)) if self.entityset.time_type is None: self.entityset.time_type =", "dataframe''' return self.df.shape def __eq__(self, other, deep=False): if self.index !=", ":class:`.Relationship`, :class:`.Variable`, :class:`.EntitySet` \"\"\" def __init__(self, id, df, entityset, variable_types=None,", "[index_variable] + [v for v in variables if v.id !=", "(subclass of `Variable`) : Type of variable to convert to.", "+= \"interesting value\" logger.info(msg.format(variable.id, idx)) variable.interesting_values = variable.interesting_values.append(pd.Series([idx])) else: fraction", "self.df = self.df.set_index(self.df[variable_id], drop=False) self.df.index.name = None if unique: assert", "the value to interesting_values if it represents more than #", "df.columns[0] elif make_index and index in df.columns: # Case 3:", "is_instance(self.df, (dd, ks), 'DataFrame'): t = time_type # skip checking", "last_time_index} self.created_index = created_index self._verbose = verbose secondary_time_index = secondary_time_index", "numeric or\" \" datetime\" % (self.id)) if self.entityset.time_type != time_type:", "def __init__(self, id, df, entityset, variable_types=None, index=None, time_index=None, secondary_time_index=None, last_time_index=None,", "ks, 'DataFrame'): df = df.koalas.attach_id_column('distributed-sequence', index) else: df.insert(0, index, range(len(df)))", "not a string)\".format(c)) if time_index is not None and time_index", "in self.variables: if v.id not in df.columns: raise ValueError(\"Updated dataframe", "dataframe \" \\ \"(Entity {})\".format(self.id) self.convert_variable_type(variable_id, vtypes.Index, convert_data=False) self.index =", "variable_id (string) : Name of an existing variable to set", "(dd, ks), 'DataFrame') or self.df.empty: time_to_check = vtypes.DEFAULT_DTYPE_VALUES[self[time_index]._default_pandas_dtype] else: time_to_check", "and time_index not in df.columns: raise LookupError('Time index not found", "make_ecommerce_entityset >>> es = make_ecommerce_entityset() >>> es[\"customers\"].convert_variable_type(\"engagement_level\", vtypes.Categorical) \"\"\" if", "type(v) for v in self.variables} def convert_variable_type(self, variable_id, new_type, convert_data=True,", "Columns: {})\".format( shape[0], shape[1]) return repr_out @property def shape(self): '''Shape", "is at the beginning index_variable = [v for v in", "# skip sorting else: t = vtypes.NumericTimeIndex if col_is_datetime(self.df[variable_id]): t", "None: self.entityset.time_type = time_type elif self.entityset.time_type != time_type: raise TypeError(\"%s", "(str) or (type, kwargs) to pass keyword arguments to the", "def __eq__(self, other, deep=False): if self.index != other.index: return False", "make index but did not specify column name assert not", ") for variable in self.variables} for variable in other.variables: variables[variable]", "# Case 5: make_index with no errors or warnings #", "Args: variable_id (string) : Name of an existing variable to", "type_strings (str) or (type, kwargs) to pass keyword arguments to", "len(variable_ids) == 0: return self.df = self.df.drop(variable_ids, axis=1) for v_id", "this constraints # don't add interesting values for entities in", "values for entities in relationships skip = False for r", "not specify column name assert not make_index, \"Must specify an", "if not isinstance(variable_ids, list): raise TypeError('variable_ids must be a list", "TODO: Remove once Text has been removed from variable types", "= df.koalas.attach_id_column('distributed-sequence', index) else: df.insert(0, index, range(len(df))) created_index = index", "# sort by time variable, then by index self.df =", "index (str): Name of id column in the dataframe. time_index", "= _df @property def last_time_index(self): ''' Time index of the", "time_type: raise TypeError(\"%s time index is %s type which differs", "than # 25% of the values we have not seen", "and stores relevant metadata and data An Entity is analogous", "v_id in variable_ids: v = self._get_variable(v_id) self.variables.remove(v) def set_time_index(self, variable_id,", "elif self.entityset.time_type != time_type: raise TypeError(\"%s time index is %s" ]
[ "== '/': return parsed_string[1:] return parsed_string def get_repo_name_from_ssh_url(repo_url): predicate =", "'/': return parsed_string[1:] return parsed_string def get_repo_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\:)(.*)(?=\\.)')", "urllib.parse import urlparse import logging def check_url_is_http(repo_url): predicate = re.compile('^https?://.*$')", "is None else True def get_domain_name_from_http_url(repo_url): site_object = urlparse(repo_url) return", "= re.compile(r'(?<=\\@)(.*)(?=\\:)') match = predicate.search(repo_url) return match.group() def validate_protocol_exists(is_ssh, is_http):", "predicate = re.compile(r'(?<=\\:)(.*)(?=\\.)') match = predicate.search(repo_url) return match.group() def get_domain_name_from_ssh_url(repo_url):", "match is None else True def get_domain_name_from_http_url(repo_url): site_object = urlparse(repo_url)", "def get_domain_name_from_http_url(repo_url): site_object = urlparse(repo_url) return site_object.netloc def get_repo_name_from_http_url(repo_url): site_object", "def get_domain_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\@)(.*)(?=\\:)') match = predicate.search(repo_url) return match.group()", "if parsed_string[0] == '/': return parsed_string[1:] return parsed_string def get_repo_name_from_ssh_url(repo_url):", "validate_protocol_exists(is_ssh, is_http): if not is_ssh and not is_http: err_message =", "None else True def get_domain_name_from_http_url(repo_url): site_object = urlparse(repo_url) return site_object.netloc", "url provided is not http(s) or ssh\" logging.critical(err_message) raise RuntimeError(err_message)", "= urlparse(repo_url) return site_object.netloc def get_repo_name_from_http_url(repo_url): site_object = urlparse(repo_url) parsed_string", "is not http(s) or ssh\" logging.critical(err_message) raise RuntimeError(err_message) def check_url_protocol(repo_url):", "urlparse(repo_url) return site_object.netloc def get_repo_name_from_http_url(repo_url): site_object = urlparse(repo_url) parsed_string =", "match = predicate.search(repo_url) return False if match is None else", "repository url provided is not http(s) or ssh\" logging.critical(err_message) raise", "or ssh\" logging.critical(err_message) raise RuntimeError(err_message) def check_url_protocol(repo_url): is_ssh = check_url_is_ssh(repo_url)", "def check_url_is_ssh(repo_url): predicate = re.compile(r'^git\\@.*\\.git$') match = predicate.search(repo_url) return False", "re.compile(r'^git\\@.*\\.git$') match = predicate.search(repo_url) return False if match is None", "re from urllib.parse import urlparse import logging def check_url_is_http(repo_url): predicate", "match = predicate.search(repo_url) return match.group() def validate_protocol_exists(is_ssh, is_http): if not", "\"Error: repository url provided is not http(s) or ssh\" logging.critical(err_message)", "return parsed_string[1:] return parsed_string def get_repo_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\:)(.*)(?=\\.)') match", "'', site_object.path) if parsed_string[0] == '/': return parsed_string[1:] return parsed_string", "= predicate.search(repo_url) return False if match is None else True", "def check_url_is_http(repo_url): predicate = re.compile('^https?://.*$') match = predicate.search(repo_url) return False", "parsed_string[1:] return parsed_string def get_repo_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\:)(.*)(?=\\.)') match =", "re.compile(r'(?<=\\@)(.*)(?=\\:)') match = predicate.search(repo_url) return match.group() def validate_protocol_exists(is_ssh, is_http): if", "site_object.path) if parsed_string[0] == '/': return parsed_string[1:] return parsed_string def", "def get_repo_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\:)(.*)(?=\\.)') match = predicate.search(repo_url) return match.group()", "match is None else True def check_url_is_ssh(repo_url): predicate = re.compile(r'^git\\@.*\\.git$')", "True def check_url_is_ssh(repo_url): predicate = re.compile(r'^git\\@.*\\.git$') match = predicate.search(repo_url) return", "urlparse import logging def check_url_is_http(repo_url): predicate = re.compile('^https?://.*$') match =", "check_url_is_http(repo_url): predicate = re.compile('^https?://.*$') match = predicate.search(repo_url) return False if", "site_object = urlparse(repo_url) parsed_string = re.sub(r'\\.git$', '', site_object.path) if parsed_string[0]", "= re.compile(r'^git\\@.*\\.git$') match = predicate.search(repo_url) return False if match is", "predicate = re.compile(r'^git\\@.*\\.git$') match = predicate.search(repo_url) return False if match", "logging def check_url_is_http(repo_url): predicate = re.compile('^https?://.*$') match = predicate.search(repo_url) return", "False if match is None else True def check_url_is_ssh(repo_url): predicate", "return parsed_string def get_repo_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\:)(.*)(?=\\.)') match = predicate.search(repo_url)", "return match.group() def get_domain_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\@)(.*)(?=\\:)') match = predicate.search(repo_url)", "predicate = re.compile(r'(?<=\\@)(.*)(?=\\:)') match = predicate.search(repo_url) return match.group() def validate_protocol_exists(is_ssh,", "ssh\" logging.critical(err_message) raise RuntimeError(err_message) def check_url_protocol(repo_url): is_ssh = check_url_is_ssh(repo_url) is_http", "else True def check_url_is_ssh(repo_url): predicate = re.compile(r'^git\\@.*\\.git$') match = predicate.search(repo_url)", "err_message = \"Error: repository url provided is not http(s) or", "= re.sub(r'\\.git$', '', site_object.path) if parsed_string[0] == '/': return parsed_string[1:]", "urlparse(repo_url) parsed_string = re.sub(r'\\.git$', '', site_object.path) if parsed_string[0] == '/':", "check_url_protocol(repo_url): is_ssh = check_url_is_ssh(repo_url) is_http = check_url_is_http(repo_url) validate_protocol_exists(is_ssh, is_http) return", "return False if match is None else True def check_url_is_ssh(repo_url):", "= urlparse(repo_url) parsed_string = re.sub(r'\\.git$', '', site_object.path) if parsed_string[0] ==", "else True def get_domain_name_from_http_url(repo_url): site_object = urlparse(repo_url) return site_object.netloc def", "predicate.search(repo_url) return match.group() def get_domain_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\@)(.*)(?=\\:)') match =", "return False if match is None else True def get_domain_name_from_http_url(repo_url):", "def check_url_protocol(repo_url): is_ssh = check_url_is_ssh(repo_url) is_http = check_url_is_http(repo_url) validate_protocol_exists(is_ssh, is_http)", "True def get_domain_name_from_http_url(repo_url): site_object = urlparse(repo_url) return site_object.netloc def get_repo_name_from_http_url(repo_url):", "re.compile(r'(?<=\\:)(.*)(?=\\.)') match = predicate.search(repo_url) return match.group() def get_domain_name_from_ssh_url(repo_url): predicate =", "parsed_string = re.sub(r'\\.git$', '', site_object.path) if parsed_string[0] == '/': return", "not is_ssh and not is_http: err_message = \"Error: repository url", "match.group() def validate_protocol_exists(is_ssh, is_http): if not is_ssh and not is_http:", "is_ssh = check_url_is_ssh(repo_url) is_http = check_url_is_http(repo_url) validate_protocol_exists(is_ssh, is_http) return (is_ssh,", "re.compile('^https?://.*$') match = predicate.search(repo_url) return False if match is None", "= predicate.search(repo_url) return match.group() def get_domain_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\@)(.*)(?=\\:)') match", "match = predicate.search(repo_url) return match.group() def get_domain_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\@)(.*)(?=\\:)')", "predicate.search(repo_url) return match.group() def validate_protocol_exists(is_ssh, is_http): if not is_ssh and", "is None else True def check_url_is_ssh(repo_url): predicate = re.compile(r'^git\\@.*\\.git$') match", "check_url_is_ssh(repo_url): predicate = re.compile(r'^git\\@.*\\.git$') match = predicate.search(repo_url) return False if", "= predicate.search(repo_url) return match.group() def validate_protocol_exists(is_ssh, is_http): if not is_ssh", "def validate_protocol_exists(is_ssh, is_http): if not is_ssh and not is_http: err_message", "http(s) or ssh\" logging.critical(err_message) raise RuntimeError(err_message) def check_url_protocol(repo_url): is_ssh =", "match.group() def get_domain_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\@)(.*)(?=\\:)') match = predicate.search(repo_url) return", "= \"Error: repository url provided is not http(s) or ssh\"", "<gh_stars>10-100 import re from urllib.parse import urlparse import logging def", "logging.critical(err_message) raise RuntimeError(err_message) def check_url_protocol(repo_url): is_ssh = check_url_is_ssh(repo_url) is_http =", "= check_url_is_ssh(repo_url) is_http = check_url_is_http(repo_url) validate_protocol_exists(is_ssh, is_http) return (is_ssh, is_http)", "get_repo_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\:)(.*)(?=\\.)') match = predicate.search(repo_url) return match.group() def", "return site_object.netloc def get_repo_name_from_http_url(repo_url): site_object = urlparse(repo_url) parsed_string = re.sub(r'\\.git$',", "get_repo_name_from_http_url(repo_url): site_object = urlparse(repo_url) parsed_string = re.sub(r'\\.git$', '', site_object.path) if", "def get_repo_name_from_http_url(repo_url): site_object = urlparse(repo_url) parsed_string = re.sub(r'\\.git$', '', site_object.path)", "= re.compile(r'(?<=\\:)(.*)(?=\\.)') match = predicate.search(repo_url) return match.group() def get_domain_name_from_ssh_url(repo_url): predicate", "re.sub(r'\\.git$', '', site_object.path) if parsed_string[0] == '/': return parsed_string[1:] return", "return match.group() def validate_protocol_exists(is_ssh, is_http): if not is_ssh and not", "get_domain_name_from_http_url(repo_url): site_object = urlparse(repo_url) return site_object.netloc def get_repo_name_from_http_url(repo_url): site_object =", "if not is_ssh and not is_http: err_message = \"Error: repository", "not is_http: err_message = \"Error: repository url provided is not", "if match is None else True def get_domain_name_from_http_url(repo_url): site_object =", "if match is None else True def check_url_is_ssh(repo_url): predicate =", "is_ssh and not is_http: err_message = \"Error: repository url provided", "False if match is None else True def get_domain_name_from_http_url(repo_url): site_object", "import urlparse import logging def check_url_is_http(repo_url): predicate = re.compile('^https?://.*$') match", "provided is not http(s) or ssh\" logging.critical(err_message) raise RuntimeError(err_message) def", "import re from urllib.parse import urlparse import logging def check_url_is_http(repo_url):", "predicate = re.compile('^https?://.*$') match = predicate.search(repo_url) return False if match", "from urllib.parse import urlparse import logging def check_url_is_http(repo_url): predicate =", "parsed_string[0] == '/': return parsed_string[1:] return parsed_string def get_repo_name_from_ssh_url(repo_url): predicate", "RuntimeError(err_message) def check_url_protocol(repo_url): is_ssh = check_url_is_ssh(repo_url) is_http = check_url_is_http(repo_url) validate_protocol_exists(is_ssh,", "get_domain_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\@)(.*)(?=\\:)') match = predicate.search(repo_url) return match.group() def", "parsed_string def get_repo_name_from_ssh_url(repo_url): predicate = re.compile(r'(?<=\\:)(.*)(?=\\.)') match = predicate.search(repo_url) return", "site_object.netloc def get_repo_name_from_http_url(repo_url): site_object = urlparse(repo_url) parsed_string = re.sub(r'\\.git$', '',", "site_object = urlparse(repo_url) return site_object.netloc def get_repo_name_from_http_url(repo_url): site_object = urlparse(repo_url)", "is_http: err_message = \"Error: repository url provided is not http(s)", "is_http): if not is_ssh and not is_http: err_message = \"Error:", "and not is_http: err_message = \"Error: repository url provided is", "= re.compile('^https?://.*$') match = predicate.search(repo_url) return False if match is", "predicate.search(repo_url) return False if match is None else True def", "not http(s) or ssh\" logging.critical(err_message) raise RuntimeError(err_message) def check_url_protocol(repo_url): is_ssh", "import logging def check_url_is_http(repo_url): predicate = re.compile('^https?://.*$') match = predicate.search(repo_url)", "None else True def check_url_is_ssh(repo_url): predicate = re.compile(r'^git\\@.*\\.git$') match =", "raise RuntimeError(err_message) def check_url_protocol(repo_url): is_ssh = check_url_is_ssh(repo_url) is_http = check_url_is_http(repo_url)" ]
[ "eventlet mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL) class CenCtrlScheduler(EzScheduler): def __init__(self, switches_,", "that is not done anything %d\" % count) def check_and_send_possible_updates(self,", "check_and_send_possible_split_updates(self, update_infos): has_execution = True while has_execution: has_execution = False", "update_status = \"SENT_ADDING\" elif link_segment.update_status == constants.SENT_REMOVING: self.log.debug(\"must receive %d", "%d\" % count) def check_possible_update_by_links(self, update_infos): has_execution = True while", "-= l_segment.vol count = 0 for l_segment in self.segments_by_seg_path_id.values(): if", "True break if (not is_dependency_loop_op and (link.avail_cap - l_segment.vol <", "constants.SENT_REMOVING def update_message_queues(self, update_infos, process_update_info_func): increased = set() related_sws =", "l_segment in self.segments_by_seg_path_id.values(): old_sws = set(l_segment.old_seg) old_sws.add(l_segment.init_sw) seg_path_id = l_segment.seg_path_id", "= self.links_by_endpoints[endpoints] is_dependency_loop_op = False for op in link.to_adds_loop: if", "%d\" % count) def check_and_send_possible_updates(self, update_infos): has_execution = True while", "str(time() * 1000 - time_start_computing)) def process_coherent(self): send_to_sames = set()", "= True # endpoints = (link.src, link.dst) # total_vol =", "\"FINISH_ADDING\" elif link_segment.update_status == constants.FINISH_REMOVING: update_status = \"FINISH_REMOVING\" self.log.debug(\"segment %s", "%d\" # % (str(link_segment.seg_path_id), len(link_segment.new_seg))) if link_segment.update_status == constants.SENT_ADDING \\", "def update_message_queues(self, update_infos, process_update_info_func): increased = set() related_sws = set([])", "def check_possible_update_by_links(self, update_infos): has_execution = True while has_execution: has_execution =", "else: l_segment.update_status = constants.FINISH_ALL def are_all_moving_in_ops_finished(self, link): for u_op in", "self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_dependency_loop_op =", "False finished = False break has_no_pending_barrier = self.has_not_pending_msg() if not", "op in link.to_adds_only: if op.seg_path_id == seg_path_id: return 0 splittable,", "for sw in update_infos[key].update_nexts.keys(): if sw not in increased: self.current_notification_time[sw]", "or current_state == constants.SENT_ADDING: return False return True def is_capable(self,", "range(len(l_segment.new_seg) - 1): # self.log.debug(\"send to sw%s\" % str(l_segment.new_seg[i])) next_sw", "UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING l_segment.is_splitting", "misc import global_vars import time import eventlet mulog = logger.getLogger('cen_scheduler',", "set() l_segment.update_status = constants.SENT_ADDING l_segment.is_splitting = True for pair in", "self.to_sames = defaultdict(list) self.encounter_deadlock = False self.do_segmentation = True def", "1): next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw,", "def deque_msg_from_notification_queue(self, sw): msg = self.notification_queues[sw].popleft() self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -= 1", "check_finish_update(self): count = 0 finished = True for link_segment in", "break if (not is_dependency_loop_op and (link.avail_cap - l_segment.vol < self.remaining_vol_of_dependency_loop_on_link[endpoints]))", "+= self.segments_by_seg_path_id[add_op.seg_path_id].vol return total_vol def check_to_split(self, link, l_segment): pass def", "l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING: count += 1", "NotificationMessage) update_infos = defaultdict(CenUpdateInfo) self.log.debug(\"handle removed msg %s\" % msg)", "% str(self.no_of_pending_msgs)) related_sws.add(sw) #self.datapaths[sw + 1]) return related_sws def increase_processing_time(self,", "old_flows, new_flows): time_start_computing = time.time() * 1000 ez_flow_tool.create_dependency_graph(old_flows, new_flows, self.links_by_endpoints,", "find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id): # pool = eventlet.GreenPool() mulog.info(\"start finding dependency", "range(len(l_segment.old_seg) - 1): # self.log.debug(\"send to: %s\" % l_segment.old_seg[i]) next_sw", "has_execution = True while has_execution: has_execution = False for l_segment", "msgs for segment %s, new_seg_length = %d\" # % (str(link_segment.seg_path_id),", "in increased: self.current_notification_time[sw] += 1 increased.add(sw) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] = 0", "- self.current_start_time)*1000)) assert update_info, CenUpdateInfo for sw in update_infos[key].update_nexts.keys(): if", "constants.SENT_ADDING: return False return True def is_capable(self, seg_path_id): # TODO:", "__init__(self, switches_, log_): self.switches = switches_ super(CenCtrlScheduler, self).__init__(0, log_) self.remaining_vol_of_dependency_loop_on_link", "new_seg_length = %d\" # % (str(link_segment.seg_path_id), len(link_segment.new_seg))) if link_segment.update_status ==", "finished = True for link_segment in self.segments_by_seg_path_id.values(): if link_segment.update_status !=", "= constants.FINISH_ALL self.log.debug(\"finish %s\" % str(link_segment.seg_path_id)) self.check_and_send_possible_updates(update_infos) return update_infos def", "CenUpdateInfo, UpdateNext from misc import constants, logger from domain.message import", "finding dependency loop and sort updates\") mulog.info(links_by_endpoints) for sw in", "set(l_segment.old_seg) old_sws.add(l_segment.init_sw) seg_path_id = l_segment.seg_path_id self.received_removed_msg[seg_path_id] = set() if l_segment.remove_only:", "l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -= l_segment.vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id", "str(self.no_of_pending_msgs)) related_sws.add(sw) #self.datapaths[sw + 1]) return related_sws def increase_processing_time(self, sw):", "def has_pending_msg_of_sw(self, sw): return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] > 0 # def", "u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol count = 0 for", "link.to_adds_loop + link.to_adds_only: total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol return total_vol def check_to_split(self,", "sw in update_infos[key].update_nexts.keys(): if sw not in increased: self.current_notification_time[sw] +=", "total_vol = 0 for add_op in link.to_adds + link.to_adds_loop +", "update_info) self.log.debug(\"add message in processing update_info: %s\" % update_info) self.log.debug(\"pending", "# \"get from segment %s\" % (msg.src_id, dst, # self.links_by_endpoints[pair].avail_cap,", "* 1000 - time_start_computing)) def process_coherent(self): send_to_sames = set() for", "self.log.debug(\"handle removed msg %s\" % msg) self.received_removed_msg[msg.seg_path_id].add(msg.src_id) link_segment = self.segments_by_seg_path_id[msg.seg_path_id]", "= True break is_add_only = False for op in link.to_adds_only:", "def is_capable(self, seg_path_id): # TODO: Update remaining_vol_of_loop when adding or", "when adding or removing segment final_split_vol = 0 l_segment =", "True while has_execution: has_execution = False for l_segment in self.segments_by_seg_path_id.values():", "= set() for key in self.to_sames.keys(): to_same = self.to_sames[key] for", "from misc import global_vars import time import eventlet mulog =", "1 def enque_msg_to_notification_queue(self, sw, msg): self.notification_queues[sw].append(msg) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] += 1", "from collections import defaultdict from ez_scheduler import EzScheduler from ez_lib.ez_ob", "processes ########## self.no_of_pending_msgs = {} self.notification_queues = {x: deque([]) for", "self.links_by_endpoints[pair].avail_cap, # str(link_segment.seg_path_id))) if len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg) - 1: link_segment.update_status", "msg): update_infos = defaultdict(CenUpdateInfo) if not self.received_updated_msg.has_key(msg.seg_path_id): self.received_updated_msg[msg.seg_path_id] = set()", "# self.links_by_endpoints[pair].avail_cap, # str(l_segment.seg_path_id))) if len(l_segment.old_seg) > 1: for i", "def init_logger(): return logger.getLogger(\"Centralized Controller\", constants.LOG_LEVEL) def create_dependency_graph(self, old_flows, new_flows):", "and (link.avail_cap - l_segment.vol < self.remaining_vol_of_dependency_loop_on_link[endpoints])) \\ or (is_dependency_loop_op and", "for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING: continue seg_path_id", "< self.remaining_vol_of_dependency_loop_on_link[endpoints])) \\ or (is_dependency_loop_op and link.avail_cap < l_segment.vol)\\ or", "self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING for pair in l_segment.new_link_seg:", "self.segments_by_seg_path_id[msg.seg_path_id] next_idx = 0 if msg.src_id != link_segment.init_sw: next_idx =", "count) def check_possible_update_by_links(self, update_infos): has_execution = True while has_execution: has_execution", "not in increased: self.current_notification_time[sw] += 1 increased.add(sw) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] =", "UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): next_sw", "increased.add(sw) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] = 0 #update_next = update_info.update_nexts[sw] process_update_info_func(sw, update_info)", "processes ########### self.to_sames = defaultdict(list) self.encounter_deadlock = False self.do_segmentation =", "self.scheduling_mode = constants.NORMAL_MODE if current_mode == constants.CONGESTION_MODE: return constants.FINISHED_WITH_DEADLOCK else:", "%s\" % str(link_segment.seg_path_id)) self.check_and_send_possible_updates(update_infos) return update_infos def check_finish_update(self): count =", "CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) pair = (l_segment.init_sw, l_segment.old_seg[0]) self.links_by_endpoints[pair].avail_cap += l_segment.vol", "{} # for link in self.links_by_endpoints.values(): # self.check_all_capable_for_link(link, executable_segments_by_link) def", "u_op in link.to_adds + link.to_adds_loop: current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status if current_state", "< 1: link_segment.update_status = constants.FINISH_ALL else: # self.log.info(\"receive enough updated", "in link.to_adds_loop: self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \\ += self.segments_by_seg_path_id[add_op.seg_path_id].vol def find_dependency_loop_and_sort_updates(self, links_by_endpoints,", "# pool = eventlet.GreenPool() mulog.info(\"start finding dependency loop and sort", "= l_segment.old_seg[i + 1] update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.REMOVE_NEXT) self.received_removed_msg[l_segment.seg_path_id]", "from: %s\" % self.received_removed_msg[link_segment.seg_path_id]) update_status = \"SENT REMOVING\" elif link_segment.update_status", "== constants.NOTHING: count += 1 update_status = \"NOTHING\" if link_segment.update_status", "True break is_add_only = False for op in link.to_adds_only: if", "%d->%d: %f, \" # \"get from segment %s\" % (msg.src_id,", "sw): return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] > 0 # def check_all_capable_for_link(self, link,", "len(l_segment.old_seg) > 1: for i in range(len(l_segment.old_seg) - 1): #", "ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id) for link in links_by_endpoints.values(): if link.src ==", "False return True def is_capable(self, seg_path_id): # TODO: Update remaining_vol_of_loop", "== constants.FINISH_ADDING: update_status = \"FINISH_ADDING\" elif link_segment.update_status == constants.FINISH_REMOVING: update_status", "three properties are used for parallel processes ########## self.no_of_pending_msgs =", "# links_by_endpoints, segments_by_seg_path_id) self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id) # pool.waitall() # for", "seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) split_vol = self.splittable_vol(l_segment.seg_path_id) if split_vol >", "self.current_processing_time[sw])] > 0 # def check_all_capable_for_link(self, link, executable_segments_by_link): # capable_segments", "True def release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment): seg_path_id = l_segment.seg_path_id if not", "super(CenCtrlScheduler, self).reset() self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict() self.received_removed_msg =", "\"SENT REMOVING\" elif link_segment.update_status == constants.FINISH_ADDING: update_status = \"FINISH_ADDING\" elif", "for x in self.switches} ########### End three properties are used", "done_loop = False # total_vol += l_segment.vol # # def", "= False self.do_segmentation = True def reset(self): super(CenCtrlScheduler, self).reset() self.remaining_vol_of_dependency_loop_on_link", "l_segment) return final_split_vol def check_and_send_possible_split_updates(self, update_infos): has_execution = True while", "+= 1 update_status = \"NOTHING\" if link_segment.update_status == constants.SENT_ADDING: self.log.debug(\"must", "link.avail_cap < l_segment.vol)\\ or (is_add_only and (not self.are_all_moving_in_ops_finished(link) or link.avail_cap", "> 0: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst)", "1 update_status = \"NOTHING\" if link_segment.update_status == constants.SENT_ADDING: self.log.debug(\"must receive", "is not finished! update_status %s.\" % (str(link_segment.seg_path_id), update_status)) # return", "are_all_moving_in_ops_finished(self, link): for u_op in link.to_adds + link.to_adds_loop: current_state =", "self.received_updated_msg.has_key(msg.seg_path_id): self.received_updated_msg[msg.seg_path_id] = set() self.received_updated_msg[msg.seg_path_id].add(msg.src_id) self.log.debug(\"handle updated msg %s\" %", "self.log.debug(\"received from: %s\" % self.received_updated_msg[link_segment.seg_path_id]) update_status = \"SENT_ADDING\" elif link_segment.update_status", "l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): # self.log.debug(\"send", "in self.to_sames.keys(): to_same = self.to_sames[key] for sw in to_same: send_to_sames.add(sw)", "l_segment.flow_src, l_segment.flow_dst) for sw in old_sws: update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id, sw,", "in link.to_adds_only: if op.seg_path_id == seg_path_id: return 0 splittable, split_vol", "l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): next_sw =", "if link_segment.update_status == constants.SENT_ADDING: self.log.debug(\"must receive %d more UPDATED msgs\"", "defaultdict() self.received_removed_msg = defaultdict() ########## Begin three properties are used", "link_segment = self.segments_by_seg_path_id[msg.seg_path_id] next_idx = 0 if msg.src_id != link_segment.init_sw:", "not self.received_updated_msg.has_key(msg.seg_path_id): self.received_updated_msg[msg.seg_path_id] = set() self.received_updated_msg[msg.seg_path_id].add(msg.src_id) self.log.debug(\"handle updated msg %s\"", "l_segment.vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair]", "1: link_segment.update_status = constants.FINISH_ALL else: # self.log.info(\"receive enough updated msgs", "+= l_segment.vol # # def check_and_send_possible_update_by_link(self, update_infos): # executable_segments_by_link =", "self.log.info(\"time to compute dependency graph: %s\" % str(time() * 1000", "dst) self.links_by_endpoints[pair].avail_cap += link_segment.vol # self.log.info(\"avail_cap of link %d->%d: %f,", "executable_segments_by_link): # capable_segments = [] # done_loop = True #", "import ez_flow_tool from collections import defaultdict from ez_scheduler import EzScheduler", "if l_segment.update_status == constants.NOTHING: count += 1 self.log.debug(\"number of flows", "in link.to_adds + link.to_adds_loop: current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status if current_state ==", "receive %d more UPDATED msgs\" % (len(link_segment.new_seg)-1)) self.log.debug(\"received from: %s\"", "link.to_adds + link.to_adds_loop: current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status if current_state == constants.NOTHING", "= time.clock() if global_vars.finish_computation_time < current_time: global_vars.finish_computation_time = time.clock() def", "+= 1 def deque_msg_from_notification_queue(self, sw): msg = self.notification_queues[sw].popleft() self.no_of_pending_msgs[(sw, self.current_processing_time[sw])]", "# l_segment.old_seg[0], # self.links_by_endpoints[pair].avail_cap, # str(l_segment.seg_path_id))) if len(l_segment.old_seg) > 1:", "to_same = self.to_sames[key] for sw in to_same: send_to_sames.add(sw) # for", "from starting\" % (update_info, (time() - self.current_start_time)*1000)) assert update_info, CenUpdateInfo", "# % (str(link_segment.seg_path_id), len(link_segment.new_seg))) if link_segment.update_status == constants.SENT_ADDING \\ and", "not finished! update_status %s.\" % (str(link_segment.seg_path_id), update_status)) # return False", "msg = NotificationMessage(0, sw, constants.COHERENT_MSG, 0) # self.send_to_switch(msg, sw) def", "self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] > 0 # def check_all_capable_for_link(self, link, executable_segments_by_link): #", "in l_segment.new_link_seg: self.log.info(\"avail_cap of link %s: %f, \" \"give %f", "key in self.to_sames.keys(): to_same = self.to_sames[key] for sw in to_same:", "self.has_not_pending_msg() if not has_no_pending_barrier: return constants.ON_GOING elif not finished: self.log.debug(\"number", "in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol count", "(link.avail_cap - l_segment.vol < self.remaining_vol_of_dependency_loop_on_link[endpoints])) \\ or (is_dependency_loop_op and link.avail_cap", "self.received_updated_msg[msg.seg_path_id].add(msg.src_id) self.log.debug(\"handle updated msg %s\" % msg) assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True", "capable_segments = [] # done_loop = True # endpoints =", ">= len(link_segment.old_seg) - 1: link_segment.update_status = constants.FINISH_ALL self.log.debug(\"finish %s\" %", "in self.no_of_pending_msgs.values(): if queue_len > 0: return False return True", "%s segs_by_segpath_id %s\" % (self.links_by_endpoints,self.segments_by_seg_path_id)) # self.log.info(\"time to compute dependency", "% update_info) self.log.debug(\"pending messages: %s\" % str(self.no_of_pending_msgs)) related_sws.add(sw) #self.datapaths[sw +", "new_flows): time_start_computing = time.time() * 1000 ez_flow_tool.create_dependency_graph(old_flows, new_flows, self.links_by_endpoints, self.segments_by_seg_path_id,", "self.log.info(\"receive updated msgs for segment %s, new_seg_length = %d\" #", "constants.NOTHING: # done_loop = False # total_vol += l_segment.vol #", "l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) split_vol =", "links_by_endpoints, segments_by_seg_path_id) self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id) # pool.waitall() # for link", "for u_op in link.to_adds + link.to_adds_loop: current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status if", "if splittable and final_split_vol > split_vol > 0: final_split_vol =", "pair = (l_segment.init_sw, l_segment.old_seg[0]) self.links_by_endpoints[pair].avail_cap += l_segment.vol # self.log.info(\"avail_cap of", "link_segment.update_status != constants.FINISH_ALL: update_status = '' if link_segment.update_status == constants.NOTHING:", "if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) for sw", "defaultdict(CenUpdateInfo) if not self.received_updated_msg.has_key(msg.seg_path_id): self.received_updated_msg[msg.seg_path_id] = set() self.received_updated_msg[msg.seg_path_id].add(msg.src_id) self.log.debug(\"handle updated", "if link_segment.update_status != constants.FINISH_ALL: update_status = '' if link_segment.update_status ==", "% count) self.scheduling_mode = constants.CONGESTION_MODE return constants.ENCOUNTER_DEADLOCK else: current_mode =", "update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) pair = (l_segment.init_sw, l_segment.old_seg[0]) self.links_by_endpoints[pair].avail_cap", "msg.src_id != link_segment.init_sw: next_idx = link_segment.old_seg.index(msg.src_id) + 1 if next_idx", "!= constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) mulog.info(\"chk&send psb_uds for", "self.splittable_vol(l_segment.seg_path_id) if split_vol > 0: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] =", "l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] =", "self.received_removed_msg = defaultdict() ########## Begin three properties are used for", "segments_by_seg_path_id): for link in links_by_endpoints.values(): if link.src == sw: ez_flow_tool.find_dependency_loop_for_link(link,", "%s\" % msg) self.received_removed_msg[msg.seg_path_id].add(msg.src_id) link_segment = self.segments_by_seg_path_id[msg.seg_path_id] next_idx = 0", "+ 1]) return related_sws def increase_processing_time(self, sw): self.current_processing_time[sw] += 1", "= set() if l_segment.remove_only: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,", "constants.SENT_ADDING \\ and len(self.received_updated_msg[msg.seg_path_id]) == \\ len(link_segment.new_seg): self.finish_adding_new_path(link_segment, update_infos) return", "update_infos): # executable_segments_by_link = {} # executable_link_by_segments = {} #", "final_split_vol = split_vol self.log.debug(\"capable %s\" % l_segment) return final_split_vol def", "self.logger.info(\"Process update info %s at %d ms from starting\" %", "str(link_segment.seg_path_id)) self.check_and_send_possible_updates(update_infos) return update_infos def check_finish_update(self): count = 0 finished", "for op in link.to_adds_only: if op.seg_path_id == seg_path_id: is_add_only =", "increased = set() related_sws = set([]) for key in update_infos.keys():", "return msg def has_pending_msg_of_sw(self, sw): return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] > 0", "links_by_endpoints, segments_by_seg_path_id) for link in links_by_endpoints.values(): if link.src == sw:", "% self.received_removed_msg[link_segment.seg_path_id]) update_status = \"SENT REMOVING\" elif link_segment.update_status == constants.FINISH_ADDING:", "self.current_notification_time[sw] += 1 increased.add(sw) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] = 0 #update_next =", "# TODO: Update remaining_vol_of_loop when adding or removing segment l_segment", "import itertools from ez_lib import ez_flow_tool from collections import defaultdict", "domain.message import * from collections import deque from misc import", "link_segment = self.segments_by_seg_path_id[msg.seg_path_id] # self.log.info(\"receive updated msgs for segment %s,", "Update remaining_vol_of_loop when adding or removing segment l_segment = self.segments_by_seg_path_id[seg_path_id]", "= UpdateNext(l_segment.seg_path_id, sw, constants.REMOVE_NEXT) l_segment.update_status = constants.SENT_REMOVING def update_message_queues(self, update_infos,", "# pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw, # links_by_endpoints, segments_by_seg_path_id) self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id) #", "update_status = \"SENT REMOVING\" elif link_segment.update_status == constants.FINISH_ADDING: update_status =", "\\ or (is_dependency_loop_op and link.avail_cap < l_segment.vol)\\ or (is_add_only and", "= 0 for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING:", "adding or removing segment final_split_vol = 0 l_segment = self.segments_by_seg_path_id[seg_path_id]", "sw): msg = self.notification_queues[sw].popleft() self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -= 1 return msg", "%s\" % (msg.src_id, dst, # self.links_by_endpoints[pair].avail_cap, # str(link_segment.seg_path_id))) if len(self.received_removed_msg[msg.seg_path_id])", "l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) mulog.info(\"chk&send psb_uds", "msg %s\" % msg) self.received_removed_msg[msg.seg_path_id].add(msg.src_id) link_segment = self.segments_by_seg_path_id[msg.seg_path_id] next_idx =", "== l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol count = 0 for l_segment", "time.time() * 1000 ez_flow_tool.create_dependency_graph(old_flows, new_flows, self.links_by_endpoints, self.segments_by_seg_path_id, self.to_sames, do_segmentation=self.do_segmentation) self.find_dependency_loop_and_sort_updates(self.links_by_endpoints,", "if link_segment.update_status == constants.NOTHING: count += 1 update_status = \"NOTHING\"", "l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT)", "0 for add_op in link.to_adds + link.to_adds_loop + link.to_adds_only: total_vol", "update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i", "update_infos) return update_infos def finish_adding_new_path(self, link_segment, update_infos): self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time()", "(str(pair), self.links_by_endpoints[pair].avail_cap, l_segment.vol, str(l_segment.seg_path_id))) self.links_by_endpoints[pair].avail_cap -= split_vol for u_op in", "adding or removing segment l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in", "l_segment) if splittable and final_split_vol > split_vol > 0: final_split_vol", "split_vol self.log.debug(\"capable %s\" % l_segment) return final_split_vol def check_and_send_possible_split_updates(self, update_infos):", "from ez_scheduler import EzScheduler from ez_lib.ez_ob import CenUpdateInfo, UpdateNext from", "link in links_by_endpoints.values(): if link.src == sw: ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id)", "in range(len(l_segment.new_seg) - 1): # self.log.debug(\"send to sw%s\" % str(l_segment.new_seg[i]))", "(is_dependency_loop_op and link.avail_cap < l_segment.vol)\\ or (is_add_only and (not self.are_all_moving_in_ops_finished(link)", "from misc import constants, logger from domain.message import * from", "mulog.info(\"links by endpoints %s segs_by_segpath_id %s\" % (self.links_by_endpoints,self.segments_by_seg_path_id)) # self.log.info(\"time", "== constants.NOTHING: # done_loop = False # total_vol += l_segment.vol", "set() for key in self.to_sames.keys(): to_same = self.to_sames[key] for sw", "updated msgs for segment %s\" % str(link_segment.seg_path_id)) link_segment.update_status = constants.FINISH_ADDING", "= time.time() * 1000 ez_flow_tool.create_dependency_graph(old_flows, new_flows, self.links_by_endpoints, self.segments_by_seg_path_id, self.to_sames, do_segmentation=self.do_segmentation)", "assert update_info, CenUpdateInfo for sw in update_infos[key].update_nexts.keys(): if sw not", "related_sws def increase_processing_time(self, sw): self.current_processing_time[sw] += 1 def enque_msg_to_notification_queue(self, sw,", "def check_and_send_possible_split_updates(self, update_infos): has_execution = True while has_execution: has_execution =", "= set() l_segment.update_status = constants.SENT_ADDING l_segment.is_splitting = True for pair", "segments_by_seg_path_id) # pool.waitall() # for link in links_by_endpoints.values(): # ez_flow_tool.compute_scheduling_info_for_a_link(link,", "= constants.SENT_ADDING for pair in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -= l_segment.vol for", "# self.check_all_capable_for_link(link, executable_segments_by_link) def total_pending_cycle_vol(self, link): total_vol = 0 for", "segment %s\" % (str(pair), self.links_by_endpoints[pair].avail_cap, l_segment.vol, str(l_segment.seg_path_id))) self.links_by_endpoints[pair].avail_cap -= split_vol", "self.received_removed_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_REMOVING else: l_segment.update_status = constants.FINISH_ALL", "= 0 if msg.src_id != link_segment.init_sw: next_idx = link_segment.old_seg.index(msg.src_id) +", "continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) split_vol = self.splittable_vol(l_segment.seg_path_id) if split_vol", "False return True def release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment): seg_path_id = l_segment.seg_path_id", "= (l_segment.init_sw, l_segment.old_seg[0]) self.links_by_endpoints[pair].avail_cap += l_segment.vol # self.log.info(\"avail_cap of link", "True def is_capable(self, seg_path_id): # TODO: Update remaining_vol_of_loop when adding", "processing update_info: %s\" % update_info) self.log.debug(\"pending messages: %s\" % str(self.no_of_pending_msgs))", "= True def __str__(self): return \"Centralized Controller\" @staticmethod def init_logger():", "len(self.received_updated_msg[msg.seg_path_id]) == \\ len(link_segment.new_seg): self.finish_adding_new_path(link_segment, update_infos) return update_infos def finish_adding_new_path(self,", "self.log.debug(l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] =", "self.log.debug(\"pending queue: %s\" % str(self.no_of_pending_msgs)) for queue_len in self.no_of_pending_msgs.values(): if", "= {x: -1 for x in self.switches} ########### End three", "l_segment.vol, str(l_segment.seg_path_id))) self.links_by_endpoints[pair].avail_cap -= split_vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if", "Update remaining_vol_of_loop when adding or removing segment final_split_vol = 0", "str(l_segment.seg_path_id))) if len(l_segment.old_seg) > 1: for i in range(len(l_segment.old_seg) -", "return total_vol def check_to_split(self, link, l_segment): pass def splittable_vol(self, seg_path_id):", "self.log.debug(\"pending messages: %s\" % str(self.no_of_pending_msgs)) related_sws.add(sw) #self.datapaths[sw + 1]) return", "########## self.no_of_pending_msgs = {} self.notification_queues = {x: deque([]) for x", "self.switches} self.current_notification_time = {x: -1 for x in self.switches} self.current_processing_time", "= %d\" # % (str(link_segment.seg_path_id), len(link_segment.new_seg))) if link_segment.update_status == constants.SENT_ADDING", "l_segment.seg_path_id self.log.debug(l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id]", "release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment): seg_path_id = l_segment.seg_path_id if not update_infos.has_key(seg_path_id): update_infos[seg_path_id]", "update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) for sw in old_sws: update_infos[seg_path_id].update_nexts[sw]", "is_add_only = False for op in link.to_adds_only: if op.seg_path_id ==", "+= 1 def enque_msg_to_notification_queue(self, sw, msg): self.notification_queues[sw].append(msg) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] +=", "# executable_link_by_segments = {} # for link in self.links_by_endpoints.values(): #", "elif link_segment.update_status == constants.SENT_REMOVING: self.log.debug(\"must receive %d more REMOVED msgs\"", "1 return msg def has_pending_msg_of_sw(self, sw): return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] >", "update_status = \"FINISH_REMOVING\" self.log.debug(\"segment %s is not finished! update_status %s.\"", "global_vars.finish_computation_time < current_time: global_vars.finish_computation_time = time.clock() def execute_all_remove_only_updates(self, update_infos): for", "self.check_all_capable_for_link(link, executable_segments_by_link) def total_pending_cycle_vol(self, link): total_vol = 0 for add_op", "(str(link_segment.seg_path_id), update_status)) # return False finished = False break has_no_pending_barrier", "x in self.switches} self.current_notification_time = {x: -1 for x in", "# if l_segment.update_status == constants.NOTHING: # done_loop = False #", "sw in to_same: send_to_sames.add(sw) # for sw in send_to_sames: #", "pool = eventlet.GreenPool() mulog.info(\"start finding dependency loop and sort updates\")", "= False # total_vol += l_segment.vol # # def check_and_send_possible_update_by_link(self,", "that is not done anything %d\" % count) def check_and_do_next_update(self,", "return False finished = False break has_no_pending_barrier = self.has_not_pending_msg() if", "sw: self.compute_required_vol_for_dependency_loop(link) current_time = time.clock() if global_vars.finish_computation_time < current_time: global_vars.finish_computation_time", "link.src == sw: ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id) for link in links_by_endpoints.values():", "update_status = \"FINISH_ADDING\" elif link_segment.update_status == constants.FINISH_REMOVING: update_status = \"FINISH_REMOVING\"", "def process_coherent(self): send_to_sames = set() for key in self.to_sames.keys(): to_same", "= defaultdict(list) self.encounter_deadlock = False self.do_segmentation = True def __str__(self):", "from segment %s\" % (msg.src_id, dst, # self.links_by_endpoints[pair].avail_cap, # str(link_segment.seg_path_id)))", "self.log.debug(\"finish %s\" % str(link_segment.seg_path_id)) self.check_and_send_possible_updates(update_infos) return update_infos def check_finish_update(self): count", "= {x: deque([]) for x in self.switches} self.current_notification_time = {x:", "sw) def compute_required_vol_for_dependency_loop(self, link): self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] = 0 for add_op", "\\ or current_state == constants.SENT_ADDING: return False return True def", "final_split_vol def check_and_send_possible_split_updates(self, update_infos): has_execution = True while has_execution: has_execution", "str(l_segment.seg_path_id))) self.links_by_endpoints[pair].avail_cap -= split_vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id", "to sw%s\" % str(l_segment.new_seg[i])) next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]]", "1 if next_idx < len(link_segment.old_seg): dst = link_segment.old_seg[next_idx] pair =", "False for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING: continue", "enough updated msgs for segment %s\" % str(link_segment.seg_path_id)) link_segment.update_status =", "related_sws = set([]) for key in update_infos.keys(): update_info = update_infos[key]", "\\ len(link_segment.new_seg): self.finish_adding_new_path(link_segment, update_infos) return update_infos def finish_adding_new_path(self, link_segment, update_infos):", "mulog.info(\"chk&send psb_uds for linksegment %s\"%l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if", "split_vol > 0: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src,", "constants.FINISH_ADDING: update_status = \"FINISH_ADDING\" elif link_segment.update_status == constants.FINISH_REMOVING: update_status =", "at %d ms from starting\" % (update_info, (time() - self.current_start_time)*1000))", "else: # self.log.info(\"receive enough updated msgs for segment %s\" %", "1): # self.log.debug(\"send to sw%s\" % str(l_segment.new_seg[i])) next_sw = l_segment.new_seg[i", "= constants.SENT_REMOVING else: l_segment.update_status = constants.FINISH_ALL def are_all_moving_in_ops_finished(self, link): for", "= {} # executable_link_by_segments = {} # for link in", "constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): next_sw = l_segment.new_seg[i", "= self.segments_by_seg_path_id[u_op.seg_path_id].update_status if current_state == constants.NOTHING \\ or current_state ==", "= \"SENT REMOVING\" elif link_segment.update_status == constants.FINISH_ADDING: update_status = \"FINISH_ADDING\"", "+= link_segment.vol # self.log.info(\"avail_cap of link %d->%d: %f, \" #", "finished = False break has_no_pending_barrier = self.has_not_pending_msg() if not has_no_pending_barrier:", "= {} self.notification_queues = {x: deque([]) for x in self.switches}", "update_infos): for l_segment in self.segments_by_seg_path_id.values(): old_sws = set(l_segment.old_seg) old_sws.add(l_segment.init_sw) seg_path_id", "constants.SENT_REMOVING else: l_segment.update_status = constants.FINISH_ALL def are_all_moving_in_ops_finished(self, link): for u_op", "and (not self.are_all_moving_in_ops_finished(link) or link.avail_cap < l_segment.vol)): return False self.log.debug(\"capable", "self.log.debug(\"add message in processing update_info: %s\" % update_info) self.log.debug(\"pending messages:", "self.log.debug(\"segment %s is not finished! update_status %s.\" % (str(link_segment.seg_path_id), update_status))", "def check_to_split(self, link, l_segment): pass def splittable_vol(self, seg_path_id): # TODO:", "= self.segments_by_seg_path_id[op.seg_path_id] # if l_segment.update_status == constants.NOTHING: # done_loop =", "self.current_processing_time = {x: -1 for x in self.switches} ########### End", "link %s: %f, \" \"give %f to segment %s\" %", "= CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) pair = (l_segment.init_sw, l_segment.old_seg[0]) self.links_by_endpoints[pair].avail_cap +=", "True for pair in l_segment.new_link_seg: self.log.info(\"avail_cap of link %s: %f,", "%f, \" \"give %f to segment %s\" % (str(pair), self.links_by_endpoints[pair].avail_cap,", "constants.REMOVE_NEXT) self.received_removed_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_REMOVING else: l_segment.update_status =", "update_info: %s\" % update_info) self.log.debug(\"pending messages: %s\" % str(self.no_of_pending_msgs)) related_sws.add(sw)", "\" # \"get from segment %s\" % (msg.src_id, dst, #", "{x: deque([]) for x in self.switches} self.current_notification_time = {x: -1", "= l_segment.seg_path_id self.log.debug(l_segment) split_vol = self.splittable_vol(l_segment.seg_path_id) if split_vol > 0:", "for queue_len in self.no_of_pending_msgs.values(): if queue_len > 0: return False", "self.links_by_endpoints.values(): # self.check_all_capable_for_link(link, executable_segments_by_link) def total_pending_cycle_vol(self, link): total_vol = 0", "self.send_to_switch(msg, sw) def compute_required_vol_for_dependency_loop(self, link): self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] = 0 for", "ez_flow_tool.create_dependency_graph(old_flows, new_flows, self.links_by_endpoints, self.segments_by_seg_path_id, self.to_sames, do_segmentation=self.do_segmentation) self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id) self.log.debug(self.links_by_endpoints) self.log.debug(self.segments_by_seg_path_id)", "i in range(len(l_segment.new_seg) - 1): next_sw = l_segment.new_seg[i + 1]", "if current_mode == constants.CONGESTION_MODE: return constants.FINISHED_WITH_DEADLOCK else: return constants.FINISHED_WITHOUT_DEADLOCK def", "< l_segment.vol)): return False self.log.debug(\"capable %s\" % l_segment) return True", "= set([]) for key in update_infos.keys(): update_info = update_infos[key] #", "# done_loop = False # total_vol += l_segment.vol # #", "in links_by_endpoints.values(): # ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id) # global_vars.finish_prioritizing_time = time.clock()", "messages: %s\" % str(self.no_of_pending_msgs)) related_sws.add(sw) #self.datapaths[sw + 1]) return related_sws", "link_segment.update_status = constants.FINISH_ADDING self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment) def remove_segment_and_check_to_update(self, msg): assert isinstance(msg,", "finished: self.log.debug(\"number of flows that is not done anything %d\"", "0 l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link =", "not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) for sw in", "update_status = \"NOTHING\" if link_segment.update_status == constants.SENT_ADDING: self.log.debug(\"must receive %d", "constants.NORMAL_MODE if current_mode == constants.CONGESTION_MODE: return constants.FINISHED_WITH_DEADLOCK else: return constants.FINISHED_WITHOUT_DEADLOCK", "is not done anything %d\" % count) def check_and_send_possible_updates(self, update_infos):", "'' if link_segment.update_status == constants.NOTHING: count += 1 update_status =", "split_vol = self.splittable_vol(l_segment.seg_path_id) if split_vol > 0: if not update_infos.has_key(seg_path_id):", "= True break if (not is_dependency_loop_op and (link.avail_cap - l_segment.vol", "total_pending_cycle_vol(self, link): total_vol = 0 for add_op in link.to_adds +", "= 0 l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link", "def __str__(self): return \"Centralized Controller\" @staticmethod def init_logger(): return logger.getLogger(\"Centralized", "not done anything %d\" % count) def check_and_do_next_update(self, msg): update_infos", "in self.segments_by_seg_path_id.values(): old_sws = set(l_segment.old_seg) old_sws.add(l_segment.init_sw) seg_path_id = l_segment.seg_path_id self.received_removed_msg[seg_path_id]", "self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] = 0 #update_next = update_info.update_nexts[sw] process_update_info_func(sw, update_info) self.log.debug(\"add", "to_same: send_to_sames.add(sw) # for sw in send_to_sames: # msg =", "link_segment.update_status == constants.NOTHING: count += 1 update_status = \"NOTHING\" if", "if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) pair =", "constants.FINISH_REMOVING: update_status = \"FINISH_REMOVING\" self.log.debug(\"segment %s is not finished! update_status", "= l_segment.seg_path_id if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst)", "in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_dependency_loop_op = False for op", "= constants.FINISH_ADDING self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment) def remove_segment_and_check_to_update(self, msg): assert isinstance(msg, NotificationMessage)", "link_segment.update_status == constants.SENT_ADDING: self.log.debug(\"must receive %d more UPDATED msgs\" %", "def create_dependency_graph(self, old_flows, new_flows): time_start_computing = time.time() * 1000 ez_flow_tool.create_dependency_graph(old_flows,", "<reponame>AlsikeE/Ez import itertools from ez_lib import ez_flow_tool from collections import", "update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status =", "anything %d\" % count) def check_possible_update_by_links(self, update_infos): has_execution = True", "self.log.debug(l_segment) split_vol = self.splittable_vol(l_segment.seg_path_id) if split_vol > 0: if not", "= self.splittable_vol(l_segment.seg_path_id) if split_vol > 0: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id]", "= (link.src, link.dst) # total_vol = 0 # for op", "self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol count =", "update_infos, process_update_info_func): increased = set() related_sws = set([]) for key", "l_segment = self.segments_by_seg_path_id[op.seg_path_id] # if l_segment.update_status == constants.NOTHING: # done_loop", "Controller\" @staticmethod def init_logger(): return logger.getLogger(\"Centralized Controller\", constants.LOG_LEVEL) def create_dependency_graph(self,", "constants.LOG_LEVEL) class CenCtrlScheduler(EzScheduler): def __init__(self, switches_, log_): self.switches = switches_", "def check_and_send_possible_update_by_link(self, update_infos): # executable_segments_by_link = {} # executable_link_by_segments =", "% (str(link_segment.seg_path_id), update_status)) # return False finished = False break", "key in update_infos.keys(): update_info = update_infos[key] # self.logger.info(\"Process update info", "self.segments_by_seg_path_id[add_op.seg_path_id].vol def find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id): # pool = eventlet.GreenPool() mulog.info(\"start", "when adding or removing segment l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints", "= set() l_segment.update_status = constants.SENT_REMOVING else: l_segment.update_status = constants.FINISH_ALL def", "# return False finished = False break has_no_pending_barrier = self.has_not_pending_msg()", "= {} # for link in self.links_by_endpoints.values(): # self.check_all_capable_for_link(link, executable_segments_by_link)", "for key in update_infos.keys(): update_info = update_infos[key] # self.logger.info(\"Process update", "l_segment.vol count = 0 for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status", "msg): self.notification_queues[sw].append(msg) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] += 1 def deque_msg_from_notification_queue(self, sw): msg", "# global_vars.finish_prioritizing_time = time.clock() def find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints, segments_by_seg_path_id): for", "self.log.debug(\"send to: %s\" % l_segment.old_seg[i]) next_sw = l_segment.old_seg[i + 1]", "current_time: global_vars.finish_computation_time = time.clock() def execute_all_remove_only_updates(self, update_infos): for l_segment in", "for i in range(len(l_segment.new_seg) - 1): # self.log.debug(\"send to sw%s\"", "set() l_segment.update_status = constants.SENT_ADDING for pair in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -=", "switches_, log_): self.switches = switches_ super(CenCtrlScheduler, self).__init__(0, log_) self.remaining_vol_of_dependency_loop_on_link =", "== constants.NOTHING \\ or current_state == constants.SENT_ADDING: return False return", "def check_and_send_possible_updates(self, update_infos): has_execution = True while has_execution: has_execution =", "seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not", "else: return constants.FINISHED_WITHOUT_DEADLOCK def has_not_pending_msg(self): self.log.debug(\"pending queue: %s\" % str(self.no_of_pending_msgs))", "update_infos = defaultdict(CenUpdateInfo) self.log.debug(\"handle removed msg %s\" % msg) self.received_removed_msg[msg.seg_path_id].add(msg.src_id)", "return True def release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment): seg_path_id = l_segment.seg_path_id if", "\" \"give %f to segment %s\" % (str(pair), self.links_by_endpoints[pair].avail_cap, l_segment.vol,", "check_to_split(self, link, l_segment): pass def splittable_vol(self, seg_path_id): # TODO: Update", "End three properties are used for parallel processes ########### self.to_sames", "flows that is not done anything %d\" % count) self.scheduling_mode", "% (update_info, (time() - self.current_start_time)*1000)) assert update_info, CenUpdateInfo for sw", "from ez_lib import ez_flow_tool from collections import defaultdict from ez_scheduler", "= self.scheduling_mode self.scheduling_mode = constants.NORMAL_MODE if current_mode == constants.CONGESTION_MODE: return", "< l_segment.vol)\\ or (is_add_only and (not self.are_all_moving_in_ops_finished(link) or link.avail_cap <", "= eventlet.GreenPool() mulog.info(\"start finding dependency loop and sort updates\") mulog.info(links_by_endpoints)", "constants.FINISH_ADDING self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment) def remove_segment_and_check_to_update(self, msg): assert isinstance(msg, NotificationMessage) update_infos", "update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.REMOVE_NEXT) self.received_removed_msg[l_segment.seg_path_id] = set() l_segment.update_status =", "import * from collections import deque from misc import global_vars", "= constants.FINISH_ALL else: # self.log.info(\"receive enough updated msgs for segment", "[] # done_loop = True # endpoints = (link.src, link.dst)", "l_segment.new_link_seg: self.log.info(\"avail_cap of link %s: %f, \" \"give %f to", "global_vars.finish_computation_time = time.clock() def execute_all_remove_only_updates(self, update_infos): for l_segment in self.segments_by_seg_path_id.values():", "(self.links_by_endpoints,self.segments_by_seg_path_id)) # self.log.info(\"time to compute dependency graph: %s\" % str(time()", "self.notification_queues[sw].popleft() self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -= 1 return msg def has_pending_msg_of_sw(self, sw):", "%s\" % (l_segment.init_sw, # l_segment.old_seg[0], # self.links_by_endpoints[pair].avail_cap, # str(l_segment.seg_path_id))) if", "u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol", "== constants.NOTHING: count += 1 self.log.debug(\"number of flows that is", "- 1): next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id,", "if l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) split_vol", "set() self.received_updated_msg[msg.seg_path_id].add(msg.src_id) self.log.debug(\"handle updated msg %s\" % msg) assert self.segments_by_seg_path_id.has_key(msg.seg_path_id),", "sw in old_sws: update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id, sw, constants.REMOVE_NEXT) l_segment.update_status =", "%s is not finished! update_status %s.\" % (str(link_segment.seg_path_id), update_status)) #", "for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -=", "= 0 for add_op in link.to_adds_loop: self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \\ +=", "l_segment.flow_dst) pair = (l_segment.init_sw, l_segment.old_seg[0]) self.links_by_endpoints[pair].avail_cap += l_segment.vol # self.log.info(\"avail_cap", "= logger.getLogger('cen_scheduler', constants.LOG_LEVEL) class CenCtrlScheduler(EzScheduler): def __init__(self, switches_, log_): self.switches", "* 1000 ez_flow_tool.create_dependency_graph(old_flows, new_flows, self.links_by_endpoints, self.segments_by_seg_path_id, self.to_sames, do_segmentation=self.do_segmentation) self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id)", "= time.clock() def execute_all_remove_only_updates(self, update_infos): for l_segment in self.segments_by_seg_path_id.values(): old_sws", "time.clock() if global_vars.finish_computation_time < current_time: global_vars.finish_computation_time = time.clock() def execute_all_remove_only_updates(self,", "self.segments_by_seg_path_id[op.seg_path_id] # if l_segment.update_status == constants.NOTHING: # done_loop = False", "receive %d more REMOVED msgs\" % (len(link_segment.old_seg)-1)) self.log.debug(\"received from: %s\"", "time_start_computing = time.time() * 1000 ez_flow_tool.create_dependency_graph(old_flows, new_flows, self.links_by_endpoints, self.segments_by_seg_path_id, self.to_sames,", "or removing segment l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg:", "self.segments_by_seg_path_id[msg.seg_path_id] # self.log.info(\"receive updated msgs for segment %s, new_seg_length =", "CenUpdateInfo for sw in update_infos[key].update_nexts.keys(): if sw not in increased:", "self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] += 1 def deque_msg_from_notification_queue(self, sw): msg = self.notification_queues[sw].popleft()", "== seg_path_id: is_dependency_loop_op = True break is_add_only = False for", "= self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_add_only", "not done anything %d\" % count) def check_possible_update_by_links(self, update_infos): has_execution", "!= constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) if self.is_capable(l_segment.seg_path_id) or", "self.segments_by_seg_path_id.values(): old_sws = set(l_segment.old_seg) old_sws.add(l_segment.init_sw) seg_path_id = l_segment.seg_path_id self.received_removed_msg[seg_path_id] =", "misc import constants, logger from domain.message import * from collections", "l_segment.update_status = constants.SENT_ADDING l_segment.is_splitting = True for pair in l_segment.new_link_seg:", "= \"SENT_ADDING\" elif link_segment.update_status == constants.SENT_REMOVING: self.log.debug(\"must receive %d more", "msg) self.received_removed_msg[msg.seg_path_id].add(msg.src_id) link_segment = self.segments_by_seg_path_id[msg.seg_path_id] next_idx = 0 if msg.src_id", "links_by_endpoints.values(): # ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id) # global_vars.finish_prioritizing_time = time.clock() def", "EzScheduler from ez_lib.ez_ob import CenUpdateInfo, UpdateNext from misc import constants,", "l_segment.flow_dst) for sw in old_sws: update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id, sw, constants.REMOVE_NEXT)", "% msg) self.received_removed_msg[msg.seg_path_id].add(msg.src_id) link_segment = self.segments_by_seg_path_id[msg.seg_path_id] next_idx = 0 if", "are used for parallel processes ########### self.to_sames = defaultdict(list) self.encounter_deadlock", "is_add_only = True break if (not is_dependency_loop_op and (link.avail_cap -", "% count) def check_possible_update_by_links(self, update_infos): has_execution = True while has_execution:", "self.log.info(\"receive enough updated msgs for segment %s\" % str(link_segment.seg_path_id)) link_segment.update_status", "\"SENT_ADDING\" elif link_segment.update_status == constants.SENT_REMOVING: self.log.debug(\"must receive %d more REMOVED", "in self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id", "len(link_segment.old_seg): dst = link_segment.old_seg[next_idx] pair = (msg.src_id, dst) self.links_by_endpoints[pair].avail_cap +=", "for segment %s\" % str(link_segment.seg_path_id)) link_segment.update_status = constants.FINISH_ADDING self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment)", "if (not is_dependency_loop_op and (link.avail_cap - l_segment.vol < self.remaining_vol_of_dependency_loop_on_link[endpoints])) \\", "set() if l_segment.remove_only: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src,", "ez_lib.ez_ob import CenUpdateInfo, UpdateNext from misc import constants, logger from", "== sw: self.compute_required_vol_for_dependency_loop(link) current_time = time.clock() if global_vars.finish_computation_time < current_time:", "segments_by_seg_path_id) # global_vars.finish_prioritizing_time = time.clock() def find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints, segments_by_seg_path_id):", "in self.switches} ########### End three properties are used for parallel", "find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints, segments_by_seg_path_id): for link in links_by_endpoints.values(): if link.src", "of link %s: %f, \" \"give %f to segment %s\"", "l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for", "self.log.debug(\"handle updated msg %s\" % msg) assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True link_segment", "that is not done anything %d\" % count) def check_possible_update_by_links(self,", "# str(l_segment.seg_path_id))) if len(l_segment.old_seg) > 1: for i in range(len(l_segment.old_seg)", "self.finish_adding_new_path(link_segment, update_infos) return update_infos def finish_adding_new_path(self, link_segment, update_infos): self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] =", "continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if", "== constants.SENT_ADDING: self.log.debug(\"must receive %d more UPDATED msgs\" % (len(link_segment.new_seg)-1))", "msgs\" % (len(link_segment.new_seg)-1)) self.log.debug(\"received from: %s\" % self.received_updated_msg[link_segment.seg_path_id]) update_status =", "(time() - self.current_start_time)*1000)) assert update_info, CenUpdateInfo for sw in update_infos[key].update_nexts.keys():", "l_segment.update_status = constants.FINISH_ALL def are_all_moving_in_ops_finished(self, link): for u_op in link.to_adds", "remaining_vol_of_loop when adding or removing segment l_segment = self.segments_by_seg_path_id[seg_path_id] for", "mulog.info(links_by_endpoints) for sw in self.switches: # pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw, # links_by_endpoints,", "% count) def check_and_do_next_update(self, msg): update_infos = defaultdict(CenUpdateInfo) if not", "constants.FINISH_ALL else: # self.log.info(\"receive enough updated msgs for segment %s\"", "0 for add_op in link.to_adds_loop: self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \\ += self.segments_by_seg_path_id[add_op.seg_path_id].vol", "= link_segment.old_seg.index(msg.src_id) + 1 if next_idx < len(link_segment.old_seg): dst =", "\"NOTHING\" if link_segment.update_status == constants.SENT_ADDING: self.log.debug(\"must receive %d more UPDATED", "queue_len in self.no_of_pending_msgs.values(): if queue_len > 0: return False return", "= self.to_sames[key] for sw in to_same: send_to_sames.add(sw) # for sw", "msg = self.notification_queues[sw].popleft() self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -= 1 return msg def", "- l_segment.vol < self.remaining_vol_of_dependency_loop_on_link[endpoints])) \\ or (is_dependency_loop_op and link.avail_cap <", "starting\" % (update_info, (time() - self.current_start_time)*1000)) assert update_info, CenUpdateInfo for", "= constants.CONGESTION_MODE return constants.ENCOUNTER_DEADLOCK else: current_mode = self.scheduling_mode self.scheduling_mode =", "current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status if current_state == constants.NOTHING \\ or current_state", "return constants.ON_GOING elif not finished: self.log.debug(\"number of flows that is", "self.received_updated_msg[msg.seg_path_id] = set() self.received_updated_msg[msg.seg_path_id].add(msg.src_id) self.log.debug(\"handle updated msg %s\" % msg)", "constants.FINISHED_WITH_DEADLOCK else: return constants.FINISHED_WITHOUT_DEADLOCK def has_not_pending_msg(self): self.log.debug(\"pending queue: %s\" %", "= self.segments_by_seg_path_id[msg.seg_path_id] # self.log.info(\"receive updated msgs for segment %s, new_seg_length", "by endpoints %s segs_by_segpath_id %s\" % (self.links_by_endpoints,self.segments_by_seg_path_id)) # self.log.info(\"time to", "self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment) def remove_segment_and_check_to_update(self, msg): assert isinstance(msg, NotificationMessage) update_infos =", "= l_segment.seg_path_id self.log.debug(l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id):", "constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) split_vol = self.splittable_vol(l_segment.seg_path_id) if", "return False return True def release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment): seg_path_id =", "elif link_segment.update_status == constants.FINISH_ADDING: update_status = \"FINISH_ADDING\" elif link_segment.update_status ==", "== constants.SENT_ADDING \\ and len(self.received_updated_msg[msg.seg_path_id]) == \\ len(link_segment.new_seg): self.finish_adding_new_path(link_segment, update_infos)", "def total_pending_cycle_vol(self, link): total_vol = 0 for add_op in link.to_adds", "%s: %f, \" \"give %f to segment %s\" % (str(pair),", "count) def check_and_send_possible_updates(self, update_infos): has_execution = True while has_execution: has_execution", "self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id) # pool.waitall() # for link in links_by_endpoints.values():", "self.received_updated_msg[link_segment.seg_path_id]) update_status = \"SENT_ADDING\" elif link_segment.update_status == constants.SENT_REMOVING: self.log.debug(\"must receive", "in links_by_endpoints.values(): if link.src == sw: self.compute_required_vol_for_dependency_loop(link) current_time = time.clock()", "done anything %d\" % count) self.scheduling_mode = constants.CONGESTION_MODE return constants.ENCOUNTER_DEADLOCK", "split_vol count = 0 for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status", "1 increased.add(sw) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] = 0 #update_next = update_info.update_nexts[sw] process_update_info_func(sw,", "# ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id) # global_vars.finish_prioritizing_time = time.clock() def find_dependency_loop_and_sort_updates_by_sw(self,", "in range(len(l_segment.new_seg) - 1): next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]]", "process_coherent(self): send_to_sames = set() for key in self.to_sames.keys(): to_same =", "= \"NOTHING\" if link_segment.update_status == constants.SENT_ADDING: self.log.debug(\"must receive %d more", "self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src,", "in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_add_only = False for op", "l_segment.vol # # def check_and_send_possible_update_by_link(self, update_infos): # executable_segments_by_link = {}", "self.are_all_moving_in_ops_finished(link) or link.avail_cap < l_segment.vol)): return False self.log.debug(\"capable %s\" %", "for link in links_by_endpoints.values(): if link.src == sw: ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints,", "< len(link_segment.old_seg): dst = link_segment.old_seg[next_idx] pair = (msg.src_id, dst) self.links_by_endpoints[pair].avail_cap", "properties are used for parallel processes ########### self.to_sames = defaultdict(list)", "False break has_no_pending_barrier = self.has_not_pending_msg() if not has_no_pending_barrier: return constants.ON_GOING", "info %s at %d ms from starting\" % (update_info, (time()", "self.scheduling_mode self.scheduling_mode = constants.NORMAL_MODE if current_mode == constants.CONGESTION_MODE: return constants.FINISHED_WITH_DEADLOCK", "= self.notification_queues[sw].popleft() self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -= 1 return msg def has_pending_msg_of_sw(self,", "TODO: Update remaining_vol_of_loop when adding or removing segment l_segment =", "if l_segment.remove_only: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst)", "= CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) for sw in old_sws: update_infos[seg_path_id].update_nexts[sw] =", "%s\" % str(self.no_of_pending_msgs)) related_sws.add(sw) #self.datapaths[sw + 1]) return related_sws def", "pass def splittable_vol(self, seg_path_id): # TODO: Update remaining_vol_of_loop when adding", "while has_execution: has_execution = False for l_segment in self.segments_by_seg_path_id.values(): if", "op in link.to_adds_loop: # l_segment = self.segments_by_seg_path_id[op.seg_path_id] # if l_segment.update_status", "if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol count = 0", "update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] =", "or link.avail_cap < l_segment.vol)): return False self.log.debug(\"capable %s\" % l_segment)", "% (len(link_segment.old_seg)-1)) self.log.debug(\"received from: %s\" % self.received_removed_msg[link_segment.seg_path_id]) update_status = \"SENT", "-= 1 return msg def has_pending_msg_of_sw(self, sw): return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])]", "> 0: final_split_vol = split_vol self.log.debug(\"capable %s\" % l_segment) return", "l_segment.update_status == constants.NOTHING: # done_loop = False # total_vol +=", "links_by_endpoints, segments_by_seg_path_id) # pool.waitall() # for link in links_by_endpoints.values(): #", "self.check_and_send_possible_updates(update_infos) return update_infos def check_finish_update(self): count = 0 finished =", "send_to_sames: # msg = NotificationMessage(0, sw, constants.COHERENT_MSG, 0) # self.send_to_switch(msg,", "len(link_segment.new_seg))) if link_segment.update_status == constants.SENT_ADDING \\ and len(self.received_updated_msg[msg.seg_path_id]) == \\", "= set() l_segment.update_status = constants.SENT_ADDING for pair in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap", "if next_idx < len(link_segment.old_seg): dst = link_segment.old_seg[next_idx] pair = (msg.src_id,", "message in processing update_info: %s\" % update_info) self.log.debug(\"pending messages: %s\"", "sw, constants.REMOVE_NEXT) l_segment.update_status = constants.SENT_REMOVING def update_message_queues(self, update_infos, process_update_info_func): increased", "== constants.FINISH_REMOVING: update_status = \"FINISH_REMOVING\" self.log.debug(\"segment %s is not finished!", "l_segment): seg_path_id = l_segment.seg_path_id if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,", "for sw in to_same: send_to_sames.add(sw) # for sw in send_to_sames:", "#self.datapaths[sw + 1]) return related_sws def increase_processing_time(self, sw): self.current_processing_time[sw] +=", "done anything %d\" % count) def check_possible_update_by_links(self, update_infos): has_execution =", "= 0 for add_op in link.to_adds + link.to_adds_loop + link.to_adds_only:", "op.seg_path_id == seg_path_id: is_dependency_loop_op = True break is_add_only = False", "link = self.links_by_endpoints[endpoints] is_dependency_loop_op = False for op in link.to_adds_loop:", "links_by_endpoints.values(): if link.src == sw: ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id) for link", "seg_path_id = l_segment.seg_path_id if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src,", "CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0],", "l_segment.seg_path_id self.received_removed_msg[seg_path_id] = set() if l_segment.remove_only: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id]", "in to_same: send_to_sames.add(sw) # for sw in send_to_sames: # msg", "= self.links_by_endpoints[endpoints] is_add_only = False for op in link.to_adds_only: if", "== constants.CONGESTION_MODE: return constants.FINISHED_WITH_DEADLOCK else: return constants.FINISHED_WITHOUT_DEADLOCK def has_not_pending_msg(self): self.log.debug(\"pending", "= \"FINISH_ADDING\" elif link_segment.update_status == constants.FINISH_REMOVING: update_status = \"FINISH_REMOVING\" self.log.debug(\"segment", "= [] # done_loop = True # endpoints = (link.src,", "self.log.debug(\"capable %s\" % l_segment) return final_split_vol def check_and_send_possible_split_updates(self, update_infos): has_execution", "self.scheduling_mode = constants.CONGESTION_MODE return constants.ENCOUNTER_DEADLOCK else: current_mode = self.scheduling_mode self.scheduling_mode", "used for parallel processes ########## self.no_of_pending_msgs = {} self.notification_queues =", "has_execution: has_execution = False for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status", "self.links_by_endpoints[pair].avail_cap, # str(l_segment.seg_path_id))) if len(l_segment.old_seg) > 1: for i in", "link = self.links_by_endpoints[endpoints] is_add_only = False for op in link.to_adds_only:", "def __init__(self, switches_, log_): self.switches = switches_ super(CenCtrlScheduler, self).__init__(0, log_)", "segment final_split_vol = 0 l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in", "pool.waitall() # for link in links_by_endpoints.values(): # ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id)", "pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw, # links_by_endpoints, segments_by_seg_path_id) self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id) # pool.waitall()", "= True for pair in l_segment.new_link_seg: self.log.info(\"avail_cap of link %s:", "0 if msg.src_id != link_segment.init_sw: next_idx = link_segment.old_seg.index(msg.src_id) + 1", "link_segment.init_sw: next_idx = link_segment.old_seg.index(msg.src_id) + 1 if next_idx < len(link_segment.old_seg):", "of flows that is not done anything %d\" % count)", "collections import deque from misc import global_vars import time import", "0 finished = True for link_segment in self.segments_by_seg_path_id.values(): if link_segment.update_status", "+= 1 self.log.debug(\"number of flows that is not done anything", "not done anything %d\" % count) def check_and_send_possible_updates(self, update_infos): has_execution", "in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -= l_segment.vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if", "sw not in increased: self.current_notification_time[sw] += 1 increased.add(sw) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])]", "for sw in self.switches: # pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw, # links_by_endpoints, segments_by_seg_path_id)", "op.seg_path_id == seg_path_id: is_add_only = True break if (not is_dependency_loop_op", "anything %d\" % count) def check_and_do_next_update(self, msg): update_infos = defaultdict(CenUpdateInfo)", "% l_segment) return final_split_vol def check_and_send_possible_split_updates(self, update_infos): has_execution = True", "link_segment.update_status = constants.FINISH_ALL self.log.debug(\"finish %s\" % str(link_segment.seg_path_id)) self.check_and_send_possible_updates(update_infos) return update_infos", "total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol return total_vol def check_to_split(self, link, l_segment): pass", "constants.SENT_ADDING for pair in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -= l_segment.vol for u_op", "import CenUpdateInfo, UpdateNext from misc import constants, logger from domain.message", "split_vol = self.check_to_split(link, l_segment) if splittable and final_split_vol > split_vol", "from collections import deque from misc import global_vars import time", "== \\ len(link_segment.new_seg): self.finish_adding_new_path(link_segment, update_infos) return update_infos def finish_adding_new_path(self, link_segment,", "0: final_split_vol = split_vol self.log.debug(\"capable %s\" % l_segment) return final_split_vol", "self.log.debug(\"must receive %d more REMOVED msgs\" % (len(link_segment.old_seg)-1)) self.log.debug(\"received from:", "current_mode = self.scheduling_mode self.scheduling_mode = constants.NORMAL_MODE if current_mode == constants.CONGESTION_MODE:", "link in links_by_endpoints.values(): if link.src == sw: self.compute_required_vol_for_dependency_loop(link) current_time =", "str(l_segment.new_seg[i])) next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw,", "# for sw in send_to_sames: # msg = NotificationMessage(0, sw,", "= constants.SENT_ADDING l_segment.is_splitting = True for pair in l_segment.new_link_seg: self.log.info(\"avail_cap", "= {} self.received_updated_msg = defaultdict() self.received_removed_msg = defaultdict() ########## Begin", "final_split_vol = 0 l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg:", "for segment %s, new_seg_length = %d\" # % (str(link_segment.seg_path_id), len(link_segment.new_seg)))", "-1 for x in self.switches} self.current_processing_time = {x: -1 for", "= constants.FINISH_ALL def are_all_moving_in_ops_finished(self, link): for u_op in link.to_adds +", "logger.getLogger('cen_scheduler', constants.LOG_LEVEL) class CenCtrlScheduler(EzScheduler): def __init__(self, switches_, log_): self.switches =", "if msg.src_id != link_segment.init_sw: next_idx = link_segment.old_seg.index(msg.src_id) + 1 if", "from domain.message import * from collections import deque from misc", "1 def deque_msg_from_notification_queue(self, sw): msg = self.notification_queues[sw].popleft() self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -=", "seg_path_id: is_dependency_loop_op = True break is_add_only = False for op", "(link.src, link.dst) # total_vol = 0 # for op in", "(len(link_segment.new_seg)-1)) self.log.debug(\"received from: %s\" % self.received_updated_msg[link_segment.seg_path_id]) update_status = \"SENT_ADDING\" elif", "return True def is_capable(self, seg_path_id): # TODO: Update remaining_vol_of_loop when", "link, l_segment): pass def splittable_vol(self, seg_path_id): # TODO: Update remaining_vol_of_loop", "+= l_segment.vol # self.log.info(\"avail_cap of link %d->%d: %f, \" #", "# total_vol = 0 # for op in link.to_adds_loop: #", "0 splittable, split_vol = self.check_to_split(link, l_segment) if splittable and final_split_vol", "if sw not in increased: self.current_notification_time[sw] += 1 increased.add(sw) self.no_of_pending_msgs[(sw,", "import deque from misc import global_vars import time import eventlet", "constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING l_segment.is_splitting = True", "split_vol > 0: final_split_vol = split_vol self.log.debug(\"capable %s\" % l_segment)", "constants.SENT_REMOVING: self.log.debug(\"must receive %d more REMOVED msgs\" % (len(link_segment.old_seg)-1)) self.log.debug(\"received", "three properties are used for parallel processes ########### self.to_sames =", "sw): self.current_processing_time[sw] += 1 def enque_msg_to_notification_queue(self, sw, msg): self.notification_queues[sw].append(msg) self.no_of_pending_msgs[(sw,", "constants.NOTHING: count += 1 self.log.debug(\"number of flows that is not", "(is_add_only and (not self.are_all_moving_in_ops_finished(link) or link.avail_cap < l_segment.vol)): return False", "l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol count = 0 for l_segment in", "time.clock() def execute_all_remove_only_updates(self, update_infos): for l_segment in self.segments_by_seg_path_id.values(): old_sws =", "logger.getLogger(\"Centralized Controller\", constants.LOG_LEVEL) def create_dependency_graph(self, old_flows, new_flows): time_start_computing = time.time()", "sw%s\" % str(l_segment.new_seg[i])) next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] =", "= defaultdict() self.received_removed_msg = defaultdict() ########## Begin three properties are", "% str(l_segment.new_seg[i])) next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id,", "for key in self.to_sames.keys(): to_same = self.to_sames[key] for sw in", "update_infos, l_segment): seg_path_id = l_segment.seg_path_id if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] =", "REMOVING\" elif link_segment.update_status == constants.FINISH_ADDING: update_status = \"FINISH_ADDING\" elif link_segment.update_status", "UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING for", "update_infos.keys(): update_info = update_infos[key] # self.logger.info(\"Process update info %s at", "(msg.src_id, dst, # self.links_by_endpoints[pair].avail_cap, # str(link_segment.seg_path_id))) if len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg)", "def check_finish_update(self): count = 0 finished = True for link_segment", "sw, links_by_endpoints, segments_by_seg_path_id): for link in links_by_endpoints.values(): if link.src ==", "(l_segment.init_sw, l_segment.old_seg[0]) self.links_by_endpoints[pair].avail_cap += l_segment.vol # self.log.info(\"avail_cap of link %d->%d:", "current_time = time.clock() if global_vars.finish_computation_time < current_time: global_vars.finish_computation_time = time.clock()", "if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol count = 0", "and sort updates\") mulog.info(links_by_endpoints) for sw in self.switches: # pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw,", "import time import eventlet mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL) class CenCtrlScheduler(EzScheduler):", "= self.check_to_split(link, l_segment) if splittable and final_split_vol > split_vol >", "ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id) # global_vars.finish_prioritizing_time = time.clock() def find_dependency_loop_and_sort_updates_by_sw(self, sw,", "done anything %d\" % count) def check_and_do_next_update(self, msg): update_infos =", "% (l_segment.init_sw, # l_segment.old_seg[0], # self.links_by_endpoints[pair].avail_cap, # str(l_segment.seg_path_id))) if len(l_segment.old_seg)", "self.received_removed_msg[link_segment.seg_path_id]) update_status = \"SENT REMOVING\" elif link_segment.update_status == constants.FINISH_ADDING: update_status", "self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info", "= 0 # for op in link.to_adds_loop: # l_segment =", "% count) def check_and_send_possible_updates(self, update_infos): has_execution = True while has_execution:", "deque_msg_from_notification_queue(self, sw): msg = self.notification_queues[sw].popleft() self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -= 1 return", "count = 0 for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status ==", "# capable_segments = [] # done_loop = True # endpoints", "in self.switches} self.current_processing_time = {x: -1 for x in self.switches}", "l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_add_only = False for op in", "= self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_dependency_loop_op", "l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_dependency_loop_op = False for op in", "flows that is not done anything %d\" % count) def", "% str(time() * 1000 - time_start_computing)) def process_coherent(self): send_to_sames =", "op in link.to_adds_only: if op.seg_path_id == seg_path_id: is_add_only = True", "linksegment %s\"%l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id]", "next_idx = link_segment.old_seg.index(msg.src_id) + 1 if next_idx < len(link_segment.old_seg): dst", "more REMOVED msgs\" % (len(link_segment.old_seg)-1)) self.log.debug(\"received from: %s\" % self.received_removed_msg[link_segment.seg_path_id])", "count += 1 self.log.debug(\"number of flows that is not done", "%d more REMOVED msgs\" % (len(link_segment.old_seg)-1)) self.log.debug(\"received from: %s\" %", "op.seg_path_id == seg_path_id: return 0 splittable, split_vol = self.check_to_split(link, l_segment)", "= l_segment.seg_path_id self.log.debug(l_segment) mulog.info(\"chk&send psb_uds for linksegment %s\"%l_segment) if self.is_capable(l_segment.seg_path_id)", "ez_flow_tool from collections import defaultdict from ez_scheduler import EzScheduler from", "to compute dependency graph: %s\" % str(time() * 1000 -", "l_segment.update_status = constants.SENT_REMOVING def update_message_queues(self, update_infos, process_update_info_func): increased = set()", "(not is_dependency_loop_op and (link.avail_cap - l_segment.vol < self.remaining_vol_of_dependency_loop_on_link[endpoints])) \\ or", "CenCtrlScheduler(EzScheduler): def __init__(self, switches_, log_): self.switches = switches_ super(CenCtrlScheduler, self).__init__(0,", "= self.has_not_pending_msg() if not has_no_pending_barrier: return constants.ON_GOING elif not finished:", "add_op in link.to_adds + link.to_adds_loop + link.to_adds_only: total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol", "self.links_by_endpoints[pair].avail_cap -= l_segment.vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id ==", "%s\" % msg) assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True link_segment = self.segments_by_seg_path_id[msg.seg_path_id] #", "= set() related_sws = set([]) for key in update_infos.keys(): update_info", "in self.links_by_endpoints.values(): # self.check_all_capable_for_link(link, executable_segments_by_link) def total_pending_cycle_vol(self, link): total_vol =", "import EzScheduler from ez_lib.ez_ob import CenUpdateInfo, UpdateNext from misc import", "assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True link_segment = self.segments_by_seg_path_id[msg.seg_path_id] # self.log.info(\"receive updated msgs", "dst = link_segment.old_seg[next_idx] pair = (msg.src_id, dst) self.links_by_endpoints[pair].avail_cap += link_segment.vol", "# executable_segments_by_link = {} # executable_link_by_segments = {} # for", "= update_infos[key] # self.logger.info(\"Process update info %s at %d ms", "True for link_segment in self.segments_by_seg_path_id.values(): if link_segment.update_status != constants.FINISH_ALL: update_status", "= False for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING:", "link in self.links_by_endpoints.values(): # self.check_all_capable_for_link(link, executable_segments_by_link) def total_pending_cycle_vol(self, link): total_vol", "link.dst)] \\ += self.segments_by_seg_path_id[add_op.seg_path_id].vol def find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id): # pool", "update_infos[key] # self.logger.info(\"Process update info %s at %d ms from", "= set(l_segment.old_seg) old_sws.add(l_segment.init_sw) seg_path_id = l_segment.seg_path_id self.received_removed_msg[seg_path_id] = set() if", "for parallel processes ########## self.no_of_pending_msgs = {} self.notification_queues = {x:", "sw in self.switches: # pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw, # links_by_endpoints, segments_by_seg_path_id) self.find_dependency_loop_and_sort_updates_by_sw(sw,", "self.log.debug(\"send to sw%s\" % str(l_segment.new_seg[i])) next_sw = l_segment.new_seg[i + 1]", "link.src == sw: self.compute_required_vol_for_dependency_loop(link) current_time = time.clock() if global_vars.finish_computation_time <", "constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock:", "= defaultdict(CenUpdateInfo) self.log.debug(\"handle removed msg %s\" % msg) self.received_removed_msg[msg.seg_path_id].add(msg.src_id) link_segment", "removed msg %s\" % msg) self.received_removed_msg[msg.seg_path_id].add(msg.src_id) link_segment = self.segments_by_seg_path_id[msg.seg_path_id] next_idx", "update_infos): has_execution = True while has_execution: has_execution = False for", "deque from misc import global_vars import time import eventlet mulog", "= 0 finished = True for link_segment in self.segments_by_seg_path_id.values(): if", "related_sws.add(sw) #self.datapaths[sw + 1]) return related_sws def increase_processing_time(self, sw): self.current_processing_time[sw]", "import constants, logger from domain.message import * from collections import", "check_and_send_possible_updates(self, update_infos): has_execution = True while has_execution: has_execution = False", "return constants.FINISHED_WITH_DEADLOCK else: return constants.FINISHED_WITHOUT_DEADLOCK def has_not_pending_msg(self): self.log.debug(\"pending queue: %s\"", "self.no_of_pending_msgs.values(): if queue_len > 0: return False return True def", "set([]) for key in update_infos.keys(): update_info = update_infos[key] # self.logger.info(\"Process", "- 1): # self.log.debug(\"send to: %s\" % l_segment.old_seg[i]) next_sw =", "in link.to_adds_loop: # l_segment = self.segments_by_seg_path_id[op.seg_path_id] # if l_segment.update_status ==", "+ link.to_adds_only: total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol return total_vol def check_to_split(self, link,", "constants.FINISH_ALL: update_status = '' if link_segment.update_status == constants.NOTHING: count +=", "dependency loop and sort updates\") mulog.info(links_by_endpoints) for sw in self.switches:", "# str(link_segment.seg_path_id))) if len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg) - 1: link_segment.update_status =", "if queue_len > 0: return False return True def release_capacity_send_remove_msg_to_old_segment(self,", "are used for parallel processes ########## self.no_of_pending_msgs = {} self.notification_queues", "def check_and_do_next_update(self, msg): update_infos = defaultdict(CenUpdateInfo) if not self.received_updated_msg.has_key(msg.seg_path_id): self.received_updated_msg[msg.seg_path_id]", "%f, \" # \"get from segment %s\" % (l_segment.init_sw, #", "########### self.to_sames = defaultdict(list) self.encounter_deadlock = False self.do_segmentation = True", "%s\" % l_segment.old_seg[i]) next_sw = l_segment.old_seg[i + 1] update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] =", "(l_segment.init_sw, # l_segment.old_seg[0], # self.links_by_endpoints[pair].avail_cap, # str(l_segment.seg_path_id))) if len(l_segment.old_seg) >", "collections import defaultdict from ez_scheduler import EzScheduler from ez_lib.ez_ob import", "for add_op in link.to_adds_loop: self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \\ += self.segments_by_seg_path_id[add_op.seg_path_id].vol def", "def release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment): seg_path_id = l_segment.seg_path_id if not update_infos.has_key(seg_path_id):", "link_segment) def remove_segment_and_check_to_update(self, msg): assert isinstance(msg, NotificationMessage) update_infos = defaultdict(CenUpdateInfo)", "l_segment.seg_path_id self.log.debug(l_segment) split_vol = self.splittable_vol(l_segment.seg_path_id) if split_vol > 0: if", "link_segment.vol # self.log.info(\"avail_cap of link %d->%d: %f, \" # \"get", "old_sws: update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id, sw, constants.REMOVE_NEXT) l_segment.update_status = constants.SENT_REMOVING def", "= l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id]", "self.links_by_endpoints[endpoints] is_add_only = False for op in link.to_adds_only: if op.seg_path_id", "new_flows, self.links_by_endpoints, self.segments_by_seg_path_id, self.to_sames, do_segmentation=self.do_segmentation) self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id) self.log.debug(self.links_by_endpoints) self.log.debug(self.segments_by_seg_path_id) mulog.info(\"links", "\"FINISH_REMOVING\" self.log.debug(\"segment %s is not finished! update_status %s.\" % (str(link_segment.seg_path_id),", "+= 1 increased.add(sw) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] = 0 #update_next = update_info.update_nexts[sw]", "- time_start_computing)) def process_coherent(self): send_to_sames = set() for key in", "u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol count = 0 for", "op in link.to_adds_loop: if op.seg_path_id == seg_path_id: is_dependency_loop_op = True", "True def reset(self): super(CenCtrlScheduler, self).reset() self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg =", "self.links_by_endpoints, self.segments_by_seg_path_id, self.to_sames, do_segmentation=self.do_segmentation) self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id) self.log.debug(self.links_by_endpoints) self.log.debug(self.segments_by_seg_path_id) mulog.info(\"links by", "update_info) self.log.debug(\"pending messages: %s\" % str(self.no_of_pending_msgs)) related_sws.add(sw) #self.datapaths[sw + 1])", "has_execution = False for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status !=", "%d more UPDATED msgs\" % (len(link_segment.new_seg)-1)) self.log.debug(\"received from: %s\" %", "Begin three properties are used for parallel processes ########## self.no_of_pending_msgs", "l_segment.vol)\\ or (is_add_only and (not self.are_all_moving_in_ops_finished(link) or link.avail_cap < l_segment.vol)):", "constants.LOG_LEVEL) def create_dependency_graph(self, old_flows, new_flows): time_start_computing = time.time() * 1000", "self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol count = 0 for l_segment in self.segments_by_seg_path_id.values():", "self.log.info(\"avail_cap of link %d->%d: %f, \" # \"get from segment", "itertools from ez_lib import ez_flow_tool from collections import defaultdict from", "next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT)", "l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) if self.is_capable(l_segment.seg_path_id)", "in update_infos.keys(): update_info = update_infos[key] # self.logger.info(\"Process update info %s", "if len(l_segment.old_seg) > 1: for i in range(len(l_segment.old_seg) - 1):", "and link.avail_cap < l_segment.vol)\\ or (is_add_only and (not self.are_all_moving_in_ops_finished(link) or", "if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info =", "l_segment.old_seg[0]) self.links_by_endpoints[pair].avail_cap += l_segment.vol # self.log.info(\"avail_cap of link %d->%d: %f,", "link.dst) # total_vol = 0 # for op in link.to_adds_loop:", "%d\" % count) def check_and_do_next_update(self, msg): update_infos = defaultdict(CenUpdateInfo) if", "self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] = 0 for add_op in link.to_adds_loop: self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)]", "not done anything %d\" % count) self.scheduling_mode = constants.CONGESTION_MODE return", "total_vol += l_segment.vol # # def check_and_send_possible_update_by_link(self, update_infos): # executable_segments_by_link", "is_dependency_loop_op = True break is_add_only = False for op in", "parallel processes ########## self.no_of_pending_msgs = {} self.notification_queues = {x: deque([])", "self.received_removed_msg[seg_path_id] = set() if l_segment.remove_only: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] =", "self.current_notification_time[sw])] += 1 def deque_msg_from_notification_queue(self, sw): msg = self.notification_queues[sw].popleft() self.no_of_pending_msgs[(sw,", "in self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING: count += 1 self.log.debug(\"number", "0) # self.send_to_switch(msg, sw) def compute_required_vol_for_dependency_loop(self, link): self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] =", "dependency graph: %s\" % str(time() * 1000 - time_start_computing)) def", "\"Centralized Controller\" @staticmethod def init_logger(): return logger.getLogger(\"Centralized Controller\", constants.LOG_LEVEL) def", "next_idx < len(link_segment.old_seg): dst = link_segment.old_seg[next_idx] pair = (msg.src_id, dst)", "update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw]", "for sw in send_to_sames: # msg = NotificationMessage(0, sw, constants.COHERENT_MSG,", "self.segments_by_seg_path_id.has_key(msg.seg_path_id), True link_segment = self.segments_by_seg_path_id[msg.seg_path_id] # self.log.info(\"receive updated msgs for", "# msg = NotificationMessage(0, sw, constants.COHERENT_MSG, 0) # self.send_to_switch(msg, sw)", "constants.NOTHING: count += 1 update_status = \"NOTHING\" if link_segment.update_status ==", "def check_all_capable_for_link(self, link, executable_segments_by_link): # capable_segments = [] # done_loop", "old_sws.add(l_segment.init_sw) seg_path_id = l_segment.seg_path_id self.received_removed_msg[seg_path_id] = set() if l_segment.remove_only: if", "return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] > 0 # def check_all_capable_for_link(self, link, executable_segments_by_link):", "seg_path_id: return 0 splittable, split_vol = self.check_to_split(link, l_segment) if splittable", "0: return False return True def release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment): seg_path_id", "self.links_by_endpoints[pair].avail_cap += l_segment.vol # self.log.info(\"avail_cap of link %d->%d: %f, \"", "in processing update_info: %s\" % update_info) self.log.debug(\"pending messages: %s\" %", "def splittable_vol(self, seg_path_id): # TODO: Update remaining_vol_of_loop when adding or", "link in links_by_endpoints.values(): # ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id) # global_vars.finish_prioritizing_time =", "self.received_updated_msg = defaultdict() self.received_removed_msg = defaultdict() ########## Begin three properties", "constants.SENT_ADDING l_segment.is_splitting = True for pair in l_segment.new_link_seg: self.log.info(\"avail_cap of", "in link.to_adds + link.to_adds_loop + link.to_adds_only: total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol return", "set() related_sws = set([]) for key in update_infos.keys(): update_info =", "self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] -= 1 return msg def has_pending_msg_of_sw(self, sw): return", "splittable_vol(self, seg_path_id): # TODO: Update remaining_vol_of_loop when adding or removing", "(msg.src_id, dst) self.links_by_endpoints[pair].avail_cap += link_segment.vol # self.log.info(\"avail_cap of link %d->%d:", "l_segment.old_seg[0], # self.links_by_endpoints[pair].avail_cap, # str(l_segment.seg_path_id))) if len(l_segment.old_seg) > 1: for", "# self.log.debug(\"send to: %s\" % l_segment.old_seg[i]) next_sw = l_segment.old_seg[i +", "\" # \"get from segment %s\" % (l_segment.init_sw, # l_segment.old_seg[0],", "= False break has_no_pending_barrier = self.has_not_pending_msg() if not has_no_pending_barrier: return", "1 self.log.debug(\"number of flows that is not done anything %d\"", "update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg)", "from ez_lib.ez_ob import CenUpdateInfo, UpdateNext from misc import constants, logger", "= True def reset(self): super(CenCtrlScheduler, self).reset() self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg", "update_infos def check_finish_update(self): count = 0 finished = True for", "is not done anything %d\" % count) self.scheduling_mode = constants.CONGESTION_MODE", "segment %s\" % (msg.src_id, dst, # self.links_by_endpoints[pair].avail_cap, # str(link_segment.seg_path_id))) if", "constants.CONGESTION_MODE return constants.ENCOUNTER_DEADLOCK else: current_mode = self.scheduling_mode self.scheduling_mode = constants.NORMAL_MODE", "0: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info", "% str(link_segment.seg_path_id)) link_segment.update_status = constants.FINISH_ADDING self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment) def remove_segment_and_check_to_update(self, msg):", "set() l_segment.update_status = constants.SENT_REMOVING else: l_segment.update_status = constants.FINISH_ALL def are_all_moving_in_ops_finished(self,", "or removing segment final_split_vol = 0 l_segment = self.segments_by_seg_path_id[seg_path_id] for", "endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_add_only = False for", "used for parallel processes ########### self.to_sames = defaultdict(list) self.encounter_deadlock =", "in link.to_adds_loop: if op.seg_path_id == seg_path_id: is_dependency_loop_op = True break", "False for op in link.to_adds_only: if op.seg_path_id == seg_path_id: is_add_only", "%s\" % str(self.no_of_pending_msgs)) for queue_len in self.no_of_pending_msgs.values(): if queue_len >", "self.log.debug(l_segment) mulog.info(\"chk&send psb_uds for linksegment %s\"%l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock:", "NotificationMessage(0, sw, constants.COHERENT_MSG, 0) # self.send_to_switch(msg, sw) def compute_required_vol_for_dependency_loop(self, link):", "self.to_sames[key] for sw in to_same: send_to_sames.add(sw) # for sw in", "+ link.to_adds_loop + link.to_adds_only: total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol return total_vol def", "self.links_by_endpoints[pair].avail_cap -= split_vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id ==", "update_info, CenUpdateInfo for sw in update_infos[key].update_nexts.keys(): if sw not in", "True def __str__(self): return \"Centralized Controller\" @staticmethod def init_logger(): return", "sw: ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id) for link in links_by_endpoints.values(): if link.src", "anything %d\" % count) self.scheduling_mode = constants.CONGESTION_MODE return constants.ENCOUNTER_DEADLOCK else:", "= {x: -1 for x in self.switches} self.current_processing_time = {x:", "if len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg) - 1: link_segment.update_status = constants.FINISH_ALL self.log.debug(\"finish", "False for op in link.to_adds_only: if op.seg_path_id == seg_path_id: return", "return False return True def is_capable(self, seg_path_id): # TODO: Update", "True link_segment = self.segments_by_seg_path_id[msg.seg_path_id] # self.log.info(\"receive updated msgs for segment", "next_sw = l_segment.old_seg[i + 1] update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.REMOVE_NEXT)", "or (is_dependency_loop_op and link.avail_cap < l_segment.vol)\\ or (is_add_only and (not", "# self.send_to_switch(msg, sw) def compute_required_vol_for_dependency_loop(self, link): self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] = 0", "sw, msg): self.notification_queues[sw].append(msg) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] += 1 def deque_msg_from_notification_queue(self, sw):", "do_segmentation=self.do_segmentation) self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id) self.log.debug(self.links_by_endpoints) self.log.debug(self.segments_by_seg_path_id) mulog.info(\"links by endpoints %s segs_by_segpath_id", "link.to_adds_only: if op.seg_path_id == seg_path_id: is_add_only = True break if", "self.remaining_vol_of_dependency_loop_on_link[endpoints])) \\ or (is_dependency_loop_op and link.avail_cap < l_segment.vol)\\ or (is_add_only", "link %d->%d: %f, \" # \"get from segment %s\" %", "self.encounter_deadlock = False self.do_segmentation = True def reset(self): super(CenCtrlScheduler, self).reset()", "self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \\ += self.segments_by_seg_path_id[add_op.seg_path_id].vol def find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id): #", "constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING for pair in", "is_dependency_loop_op = False for op in link.to_adds_loop: if op.seg_path_id ==", "self.segments_by_seg_path_id.values(): if link_segment.update_status != constants.FINISH_ALL: update_status = '' if link_segment.update_status", "0 # def check_all_capable_for_link(self, link, executable_segments_by_link): # capable_segments = []", "finish_adding_new_path(self, link_segment, update_infos): self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time() * 1000 if len(link_segment.old_seg)", "has_not_pending_msg(self): self.log.debug(\"pending queue: %s\" % str(self.no_of_pending_msgs)) for queue_len in self.no_of_pending_msgs.values():", "isinstance(msg, NotificationMessage) update_infos = defaultdict(CenUpdateInfo) self.log.debug(\"handle removed msg %s\" %", "\\ and len(self.received_updated_msg[msg.seg_path_id]) == \\ len(link_segment.new_seg): self.finish_adding_new_path(link_segment, update_infos) return update_infos", "% (str(pair), self.links_by_endpoints[pair].avail_cap, l_segment.vol, str(l_segment.seg_path_id))) self.links_by_endpoints[pair].avail_cap -= split_vol for u_op", "segment %s\" % str(link_segment.seg_path_id)) link_segment.update_status = constants.FINISH_ADDING self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment) def", "* from collections import deque from misc import global_vars import", "for link in links_by_endpoints.values(): # ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id) # global_vars.finish_prioritizing_time", "add_op in link.to_adds_loop: self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \\ += self.segments_by_seg_path_id[add_op.seg_path_id].vol def find_dependency_loop_and_sort_updates(self,", "elif not finished: self.log.debug(\"number of flows that is not done", "+ 1] update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.REMOVE_NEXT) self.received_removed_msg[l_segment.seg_path_id] = set()", "self.current_processing_time[sw] += 1 def enque_msg_to_notification_queue(self, sw, msg): self.notification_queues[sw].append(msg) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])]", "# TODO: Update remaining_vol_of_loop when adding or removing segment final_split_vol", "= CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id,", "link.to_adds_loop: self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \\ += self.segments_by_seg_path_id[add_op.seg_path_id].vol def find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id):", "-1 for x in self.switches} ########### End three properties are", "# total_vol += l_segment.vol # # def check_and_send_possible_update_by_link(self, update_infos): #", "0 for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING: count", "increased: self.current_notification_time[sw] += 1 increased.add(sw) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] = 0 #update_next", "l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING: continue seg_path_id =", "if op.seg_path_id == seg_path_id: is_add_only = True break if (not", "* 1000 if len(link_segment.old_seg) < 1: link_segment.update_status = constants.FINISH_ALL else:", "self.check_to_split(link, l_segment) if splittable and final_split_vol > split_vol > 0:", "= constants.NORMAL_MODE if current_mode == constants.CONGESTION_MODE: return constants.FINISHED_WITH_DEADLOCK else: return", "== seg_path_id: is_add_only = True break if (not is_dependency_loop_op and", "l_segment.remove_only: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) for", "if l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) if", "msg %s\" % msg) assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True link_segment = self.segments_by_seg_path_id[msg.seg_path_id]", "assert isinstance(msg, NotificationMessage) update_infos = defaultdict(CenUpdateInfo) self.log.debug(\"handle removed msg %s\"", "UPDATED msgs\" % (len(link_segment.new_seg)-1)) self.log.debug(\"received from: %s\" % self.received_updated_msg[link_segment.seg_path_id]) update_status", "%d\" % count) self.scheduling_mode = constants.CONGESTION_MODE return constants.ENCOUNTER_DEADLOCK else: current_mode", "is_dependency_loop_op and (link.avail_cap - l_segment.vol < self.remaining_vol_of_dependency_loop_on_link[endpoints])) \\ or (is_dependency_loop_op", "self.segments_by_seg_path_id[add_op.seg_path_id].vol return total_vol def check_to_split(self, link, l_segment): pass def splittable_vol(self,", "%s\" % str(link_segment.seg_path_id)) link_segment.update_status = constants.FINISH_ADDING self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment) def remove_segment_and_check_to_update(self,", "for pair in l_segment.new_link_seg: self.log.info(\"avail_cap of link %s: %f, \"", "if op.seg_path_id == seg_path_id: is_dependency_loop_op = True break is_add_only =", "%d ms from starting\" % (update_info, (time() - self.current_start_time)*1000)) assert", "link_segment.old_seg[next_idx] pair = (msg.src_id, dst) self.links_by_endpoints[pair].avail_cap += link_segment.vol # self.log.info(\"avail_cap", "msg def has_pending_msg_of_sw(self, sw): return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] > 0 #", "update_status)) # return False finished = False break has_no_pending_barrier =", "== sw: ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id) for link in links_by_endpoints.values(): if", "pair in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -= l_segment.vol for u_op in self.links_by_endpoints[pair].to_adds_loop:", "self.current_start_time)*1000)) assert update_info, CenUpdateInfo for sw in update_infos[key].update_nexts.keys(): if sw", "executable_segments_by_link = {} # executable_link_by_segments = {} # for link", "update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) -", "link.dst)] = 0 for add_op in link.to_adds_loop: self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] \\", "sort updates\") mulog.info(links_by_endpoints) for sw in self.switches: # pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw,", "self.received_removed_msg[msg.seg_path_id].add(msg.src_id) link_segment = self.segments_by_seg_path_id[msg.seg_path_id] next_idx = 0 if msg.src_id !=", "link.avail_cap < l_segment.vol)): return False self.log.debug(\"capable %s\" % l_segment) return", "and len(self.received_updated_msg[msg.seg_path_id]) == \\ len(link_segment.new_seg): self.finish_adding_new_path(link_segment, update_infos) return update_infos def", "+ link.to_adds_loop: current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status if current_state == constants.NOTHING \\", "str(link_segment.seg_path_id))) if len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg) - 1: link_segment.update_status = constants.FINISH_ALL", "finished! update_status %s.\" % (str(link_segment.seg_path_id), update_status)) # return False finished", "= \"FINISH_REMOVING\" self.log.debug(\"segment %s is not finished! update_status %s.\" %", "segment l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link =", "%s.\" % (str(link_segment.seg_path_id), update_status)) # return False finished = False", "for endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_add_only = False", "True # endpoints = (link.src, link.dst) # total_vol = 0", "self.log.debug(\"received from: %s\" % self.received_removed_msg[link_segment.seg_path_id]) update_status = \"SENT REMOVING\" elif", "pair = (msg.src_id, dst) self.links_by_endpoints[pair].avail_cap += link_segment.vol # self.log.info(\"avail_cap of", "1000 if len(link_segment.old_seg) < 1: link_segment.update_status = constants.FINISH_ALL else: #", "ms from starting\" % (update_info, (time() - self.current_start_time)*1000)) assert update_info,", "self.log.debug(self.segments_by_seg_path_id) mulog.info(\"links by endpoints %s segs_by_segpath_id %s\" % (self.links_by_endpoints,self.segments_by_seg_path_id)) #", "final_split_vol > split_vol > 0: final_split_vol = split_vol self.log.debug(\"capable %s\"", "total_vol def check_to_split(self, link, l_segment): pass def splittable_vol(self, seg_path_id): #", "not has_no_pending_barrier: return constants.ON_GOING elif not finished: self.log.debug(\"number of flows", "= True while has_execution: has_execution = False for l_segment in", "loop and sort updates\") mulog.info(links_by_endpoints) for sw in self.switches: #", "has_no_pending_barrier: return constants.ON_GOING elif not finished: self.log.debug(\"number of flows that", "old_sws = set(l_segment.old_seg) old_sws.add(l_segment.init_sw) seg_path_id = l_segment.seg_path_id self.received_removed_msg[seg_path_id] = set()", "has_no_pending_barrier = self.has_not_pending_msg() if not has_no_pending_barrier: return constants.ON_GOING elif not", "range(len(l_segment.new_seg) - 1): next_sw = l_segment.new_seg[i + 1] update_info.update_nexts[l_segment.new_seg[i]] =", "segments_by_seg_path_id) for link in links_by_endpoints.values(): if link.src == sw: self.compute_required_vol_for_dependency_loop(link)", "+= self.segments_by_seg_path_id[add_op.seg_path_id].vol def find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id): # pool = eventlet.GreenPool()", "in self.switches} self.current_notification_time = {x: -1 for x in self.switches}", "constants.ENCOUNTER_DEADLOCK else: current_mode = self.scheduling_mode self.scheduling_mode = constants.NORMAL_MODE if current_mode", "self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_add_only =", "0 #update_next = update_info.update_nexts[sw] process_update_info_func(sw, update_info) self.log.debug(\"add message in processing", "sw in send_to_sames: # msg = NotificationMessage(0, sw, constants.COHERENT_MSG, 0)", "if split_vol > 0: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,", "self.current_notification_time = {x: -1 for x in self.switches} self.current_processing_time =", "{} self.notification_queues = {x: deque([]) for x in self.switches} self.current_notification_time", "for l_segment in self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING: count +=", "msgs\" % (len(link_segment.old_seg)-1)) self.log.debug(\"received from: %s\" % self.received_removed_msg[link_segment.seg_path_id]) update_status =", "= '' if link_segment.update_status == constants.NOTHING: count += 1 update_status", "mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL) class CenCtrlScheduler(EzScheduler): def __init__(self, switches_, log_):", "x in self.switches} ########### End three properties are used for", "(len(link_segment.old_seg)-1)) self.log.debug(\"received from: %s\" % self.received_removed_msg[link_segment.seg_path_id]) update_status = \"SENT REMOVING\"", "self.notification_queues = {x: deque([]) for x in self.switches} self.current_notification_time =", "= False for op in link.to_adds_only: if op.seg_path_id == seg_path_id:", "= False for op in link.to_adds_loop: if op.seg_path_id == seg_path_id:", "constants.ON_GOING elif not finished: self.log.debug(\"number of flows that is not", "msg) assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True link_segment = self.segments_by_seg_path_id[msg.seg_path_id] # self.log.info(\"receive updated", "self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol count =", "for l_segment in self.segments_by_seg_path_id.values(): old_sws = set(l_segment.old_seg) old_sws.add(l_segment.init_sw) seg_path_id =", "to segment %s\" % (str(pair), self.links_by_endpoints[pair].avail_cap, l_segment.vol, str(l_segment.seg_path_id))) self.links_by_endpoints[pair].avail_cap -=", "count) def check_and_do_next_update(self, msg): update_infos = defaultdict(CenUpdateInfo) if not self.received_updated_msg.has_key(msg.seg_path_id):", "\"get from segment %s\" % (msg.src_id, dst, # self.links_by_endpoints[pair].avail_cap, #", "- 1: link_segment.update_status = constants.FINISH_ALL self.log.debug(\"finish %s\" % str(link_segment.seg_path_id)) self.check_and_send_possible_updates(update_infos)", "UpdateNext(seg_path_id, next_sw, constants.REMOVE_NEXT) self.received_removed_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_REMOVING else:", "splittable and final_split_vol > split_vol > 0: final_split_vol = split_vol", "{x: -1 for x in self.switches} ########### End three properties", "def compute_required_vol_for_dependency_loop(self, link): self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] = 0 for add_op in", "-= split_vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id:", "check_all_capable_for_link(self, link, executable_segments_by_link): # capable_segments = [] # done_loop =", "self.log.debug(\"number of flows that is not done anything %d\" %", "for linksegment %s\"%l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id):", "time.time() * 1000 if len(link_segment.old_seg) < 1: link_segment.update_status = constants.FINISH_ALL", "in link.to_adds_only: if op.seg_path_id == seg_path_id: is_add_only = True break", "compute_required_vol_for_dependency_loop(self, link): self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] = 0 for add_op in link.to_adds_loop:", "current_state == constants.SENT_ADDING: return False return True def is_capable(self, seg_path_id):", "enque_msg_to_notification_queue(self, sw, msg): self.notification_queues[sw].append(msg) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] += 1 def deque_msg_from_notification_queue(self,", "!= link_segment.init_sw: next_idx = link_segment.old_seg.index(msg.src_id) + 1 if next_idx <", "links_by_endpoints, segments_by_seg_path_id) # global_vars.finish_prioritizing_time = time.clock() def find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints,", "> 1: for i in range(len(l_segment.old_seg) - 1): # self.log.debug(\"send", "in old_sws: update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id, sw, constants.REMOVE_NEXT) l_segment.update_status = constants.SENT_REMOVING", "l_segment.is_splitting = True for pair in l_segment.new_link_seg: self.log.info(\"avail_cap of link", "== l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol count = 0 for l_segment", "TODO: Update remaining_vol_of_loop when adding or removing segment final_split_vol =", "send_to_sames = set() for key in self.to_sames.keys(): to_same = self.to_sames[key]", "defaultdict(list) self.encounter_deadlock = False self.do_segmentation = True def reset(self): super(CenCtrlScheduler,", "< current_time: global_vars.finish_computation_time = time.clock() def execute_all_remove_only_updates(self, update_infos): for l_segment", "# pool.waitall() # for link in links_by_endpoints.values(): # ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints,", "% (self.links_by_endpoints,self.segments_by_seg_path_id)) # self.log.info(\"time to compute dependency graph: %s\" %", "update_infos def finish_adding_new_path(self, link_segment, update_infos): self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time() * 1000", "ez_scheduler import EzScheduler from ez_lib.ez_ob import CenUpdateInfo, UpdateNext from misc", "if l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) mulog.info(\"chk&send", "= NotificationMessage(0, sw, constants.COHERENT_MSG, 0) # self.send_to_switch(msg, sw) def compute_required_vol_for_dependency_loop(self,", "split_vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair]", "= defaultdict() ########## Begin three properties are used for parallel", "self.switches} ########### End three properties are used for parallel processes", "link_segment.update_status = constants.FINISH_ALL else: # self.log.info(\"receive enough updated msgs for", "self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id) self.log.debug(self.links_by_endpoints) self.log.debug(self.segments_by_seg_path_id) mulog.info(\"links by endpoints %s segs_by_segpath_id %s\"", "update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id, sw, constants.REMOVE_NEXT) l_segment.update_status = constants.SENT_REMOVING def update_message_queues(self,", "constants.REMOVE_NEXT) l_segment.update_status = constants.SENT_REMOVING def update_message_queues(self, update_infos, process_update_info_func): increased =", "time_start_computing)) def process_coherent(self): send_to_sames = set() for key in self.to_sames.keys():", "# endpoints = (link.src, link.dst) # total_vol = 0 #", "self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict() self.received_removed_msg = defaultdict() ##########", "False self.do_segmentation = True def __str__(self): return \"Centralized Controller\" @staticmethod", "return final_split_vol def check_and_send_possible_split_updates(self, update_infos): has_execution = True while has_execution:", "if op.seg_path_id == seg_path_id: return 0 splittable, split_vol = self.check_to_split(link,", "if len(link_segment.old_seg) < 1: link_segment.update_status = constants.FINISH_ALL else: # self.log.info(\"receive", "REMOVED msgs\" % (len(link_segment.old_seg)-1)) self.log.debug(\"received from: %s\" % self.received_removed_msg[link_segment.seg_path_id]) update_status", "> 0 # def check_all_capable_for_link(self, link, executable_segments_by_link): # capable_segments =", "self.to_sames, do_segmentation=self.do_segmentation) self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id) self.log.debug(self.links_by_endpoints) self.log.debug(self.segments_by_seg_path_id) mulog.info(\"links by endpoints %s", "% str(link_segment.seg_path_id)) self.check_and_send_possible_updates(update_infos) return update_infos def check_finish_update(self): count = 0", "def find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints, segments_by_seg_path_id): for link in links_by_endpoints.values(): if", "executable_link_by_segments = {} # for link in self.links_by_endpoints.values(): # self.check_all_capable_for_link(link,", "% (msg.src_id, dst, # self.links_by_endpoints[pair].avail_cap, # str(link_segment.seg_path_id))) if len(self.received_removed_msg[msg.seg_path_id]) >=", "for op in link.to_adds_only: if op.seg_path_id == seg_path_id: return 0", "{x: -1 for x in self.switches} self.current_processing_time = {x: -1", "#update_next = update_info.update_nexts[sw] process_update_info_func(sw, update_info) self.log.debug(\"add message in processing update_info:", "anything %d\" % count) def check_and_send_possible_updates(self, update_infos): has_execution = True", "= set() self.received_updated_msg[msg.seg_path_id].add(msg.src_id) self.log.debug(\"handle updated msg %s\" % msg) assert", "len(link_segment.old_seg) - 1: link_segment.update_status = constants.FINISH_ALL self.log.debug(\"finish %s\" % str(link_segment.seg_path_id))", "endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_dependency_loop_op = False for", "compute dependency graph: %s\" % str(time() * 1000 - time_start_computing))", "# self.log.info(\"receive enough updated msgs for segment %s\" % str(link_segment.seg_path_id))", "-= l_segment.vol for u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id:", "endpoints %s segs_by_segpath_id %s\" % (self.links_by_endpoints,self.segments_by_seg_path_id)) # self.log.info(\"time to compute", "create_dependency_graph(self, old_flows, new_flows): time_start_computing = time.time() * 1000 ez_flow_tool.create_dependency_graph(old_flows, new_flows,", "= self.segments_by_seg_path_id[msg.seg_path_id] next_idx = 0 if msg.src_id != link_segment.init_sw: next_idx", "self.notification_queues[sw].append(msg) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] += 1 def deque_msg_from_notification_queue(self, sw): msg =", "deque([]) for x in self.switches} self.current_notification_time = {x: -1 for", "########### End three properties are used for parallel processes ###########", "link): total_vol = 0 for add_op in link.to_adds + link.to_adds_loop", "# l_segment = self.segments_by_seg_path_id[op.seg_path_id] # if l_segment.update_status == constants.NOTHING: #", "self.current_processing_time[sw])] -= 1 return msg def has_pending_msg_of_sw(self, sw): return self.no_of_pending_msgs[(sw,", "for op in link.to_adds_loop: if op.seg_path_id == seg_path_id: is_dependency_loop_op =", "constants.FINISH_ALL def are_all_moving_in_ops_finished(self, link): for u_op in link.to_adds + link.to_adds_loop:", "time.clock() def find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints, segments_by_seg_path_id): for link in links_by_endpoints.values():", "self.switches = switches_ super(CenCtrlScheduler, self).__init__(0, log_) self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg", "l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints]", "current_state == constants.NOTHING \\ or current_state == constants.SENT_ADDING: return False", "% str(self.no_of_pending_msgs)) for queue_len in self.no_of_pending_msgs.values(): if queue_len > 0:", "link_segment, update_infos): self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time() * 1000 if len(link_segment.old_seg) <", "= True for link_segment in self.segments_by_seg_path_id.values(): if link_segment.update_status != constants.FINISH_ALL:", "update_infos): self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time() * 1000 if len(link_segment.old_seg) < 1:", "psb_uds for linksegment %s\"%l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not", "not finished: self.log.debug(\"number of flows that is not done anything", "UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): #", "# \"get from segment %s\" % (l_segment.init_sw, # l_segment.old_seg[0], #", "constants.COHERENT_MSG, 0) # self.send_to_switch(msg, sw) def compute_required_vol_for_dependency_loop(self, link): self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)]", "if not has_no_pending_barrier: return constants.ON_GOING elif not finished: self.log.debug(\"number of", "return related_sws def increase_processing_time(self, sw): self.current_processing_time[sw] += 1 def enque_msg_to_notification_queue(self,", "remaining_vol_of_loop when adding or removing segment final_split_vol = 0 l_segment", "for link in links_by_endpoints.values(): if link.src == sw: self.compute_required_vol_for_dependency_loop(link) current_time", "in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol count", "link_segment.update_status == constants.FINISH_REMOVING: update_status = \"FINISH_REMOVING\" self.log.debug(\"segment %s is not", "for op in link.to_adds_loop: # l_segment = self.segments_by_seg_path_id[op.seg_path_id] # if", "pair in l_segment.new_link_seg: self.log.info(\"avail_cap of link %s: %f, \" \"give", "self.segments_by_seg_path_id.values(): if l_segment.update_status == constants.NOTHING: count += 1 self.log.debug(\"number of", "import global_vars import time import eventlet mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL)", "def are_all_moving_in_ops_finished(self, link): for u_op in link.to_adds + link.to_adds_loop: current_state", "l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol count = 0 for l_segment in", "def reset(self): super(CenCtrlScheduler, self).reset() self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict()", "if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id,", "if not self.received_updated_msg.has_key(msg.seg_path_id): self.received_updated_msg[msg.seg_path_id] = set() self.received_updated_msg[msg.seg_path_id].add(msg.src_id) self.log.debug(\"handle updated msg", "self.links_by_endpoints[pair].avail_cap, l_segment.vol, str(l_segment.seg_path_id))) self.links_by_endpoints[pair].avail_cap -= split_vol for u_op in self.links_by_endpoints[pair].to_adds_loop:", "is not done anything %d\" % count) def check_possible_update_by_links(self, update_infos):", "link_segment.update_status == constants.SENT_ADDING \\ and len(self.received_updated_msg[msg.seg_path_id]) == \\ len(link_segment.new_seg): self.finish_adding_new_path(link_segment,", "link.to_adds_loop: current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status if current_state == constants.NOTHING \\ or", "not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) update_info = update_infos[seg_path_id]", "self.log.debug(self.links_by_endpoints) self.log.debug(self.segments_by_seg_path_id) mulog.info(\"links by endpoints %s segs_by_segpath_id %s\" % (self.links_by_endpoints,self.segments_by_seg_path_id))", "msgs for segment %s\" % str(link_segment.seg_path_id)) link_segment.update_status = constants.FINISH_ADDING self.release_capacity_send_remove_msg_to_old_segment(update_infos,", "increase_processing_time(self, sw): self.current_processing_time[sw] += 1 def enque_msg_to_notification_queue(self, sw, msg): self.notification_queues[sw].append(msg)", "more UPDATED msgs\" % (len(link_segment.new_seg)-1)) self.log.debug(\"received from: %s\" % self.received_updated_msg[link_segment.seg_path_id])", "break is_add_only = False for op in link.to_adds_only: if op.seg_path_id", "if link.src == sw: self.compute_required_vol_for_dependency_loop(link) current_time = time.clock() if global_vars.finish_computation_time", "len(link_segment.old_seg) < 1: link_segment.update_status = constants.FINISH_ALL else: # self.log.info(\"receive enough", "if link_segment.update_status == constants.SENT_ADDING \\ and len(self.received_updated_msg[msg.seg_path_id]) == \\ len(link_segment.new_seg):", "of link %d->%d: %f, \" # \"get from segment %s\"", "l_segment.old_seg[i]) next_sw = l_segment.old_seg[i + 1] update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id, next_sw,", "l_segment.update_status = constants.SENT_ADDING for pair in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -= l_segment.vol", "splittable, split_vol = self.check_to_split(link, l_segment) if splittable and final_split_vol >", "break has_no_pending_barrier = self.has_not_pending_msg() if not has_no_pending_barrier: return constants.ON_GOING elif", "constants.FINISHED_WITHOUT_DEADLOCK def has_not_pending_msg(self): self.log.debug(\"pending queue: %s\" % str(self.no_of_pending_msgs)) for queue_len", "1] update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.REMOVE_NEXT) self.received_removed_msg[l_segment.seg_path_id] = set() l_segment.update_status", "next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING l_segment.is_splitting =", "process_update_info_func(sw, update_info) self.log.debug(\"add message in processing update_info: %s\" % update_info)", "= update_info.update_nexts[sw] process_update_info_func(sw, update_info) self.log.debug(\"add message in processing update_info: %s\"", "def finish_adding_new_path(self, link_segment, update_infos): self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time() * 1000 if", "segments_by_seg_path_id) self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id) # pool.waitall() # for link in", "in links_by_endpoints.values(): if link.src == sw: ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id) for", "False # total_vol += l_segment.vol # # def check_and_send_possible_update_by_link(self, update_infos):", "if l_segment.update_status == constants.NOTHING: # done_loop = False # total_vol", "1: for i in range(len(l_segment.old_seg) - 1): # self.log.debug(\"send to:", "def remove_segment_and_check_to_update(self, msg): assert isinstance(msg, NotificationMessage) update_infos = defaultdict(CenUpdateInfo) self.log.debug(\"handle", "self).reset() self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict() self.received_removed_msg = defaultdict()", "link_segment.update_status == constants.FINISH_ADDING: update_status = \"FINISH_ADDING\" elif link_segment.update_status == constants.FINISH_REMOVING:", "= l_segment.seg_path_id self.received_removed_msg[seg_path_id] = set() if l_segment.remove_only: if not update_infos.has_key(seg_path_id):", "= update_infos[seg_path_id] update_info.update_nexts[l_segment.init_sw] = UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in", "# self.log.info(\"receive updated msgs for segment %s, new_seg_length = %d\"", "link.to_adds_loop: # l_segment = self.segments_by_seg_path_id[op.seg_path_id] # if l_segment.update_status == constants.NOTHING:", "if link.src == sw: ez_flow_tool.find_dependency_loop_for_link(link, links_by_endpoints, segments_by_seg_path_id) for link in", "properties are used for parallel processes ########## self.no_of_pending_msgs = {}", "%s at %d ms from starting\" % (update_info, (time() -", "removing segment final_split_vol = 0 l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints", "self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time() * 1000 if len(link_segment.old_seg) < 1: link_segment.update_status", "%d->%d: %f, \" # \"get from segment %s\" % (l_segment.init_sw,", "and final_split_vol > split_vol > 0: final_split_vol = split_vol self.log.debug(\"capable", "segment %s\" % (l_segment.init_sw, # l_segment.old_seg[0], # self.links_by_endpoints[pair].avail_cap, # str(l_segment.seg_path_id)))", "if global_vars.finish_computation_time < current_time: global_vars.finish_computation_time = time.clock() def execute_all_remove_only_updates(self, update_infos):", "self.switches} self.current_processing_time = {x: -1 for x in self.switches} ###########", "%f to segment %s\" % (str(pair), self.links_by_endpoints[pair].avail_cap, l_segment.vol, str(l_segment.seg_path_id))) self.links_by_endpoints[pair].avail_cap", "# def check_all_capable_for_link(self, link, executable_segments_by_link): # capable_segments = [] #", "(not self.are_all_moving_in_ops_finished(link) or link.avail_cap < l_segment.vol)): return False self.log.debug(\"capable %s\"", "# self.log.info(\"time to compute dependency graph: %s\" % str(time() *", "for link in self.links_by_endpoints.values(): # self.check_all_capable_for_link(link, executable_segments_by_link) def total_pending_cycle_vol(self, link):", "update_infos = defaultdict(CenUpdateInfo) if not self.received_updated_msg.has_key(msg.seg_path_id): self.received_updated_msg[msg.seg_path_id] = set() self.received_updated_msg[msg.seg_path_id].add(msg.src_id)", "# done_loop = True # endpoints = (link.src, link.dst) #", "return logger.getLogger(\"Centralized Controller\", constants.LOG_LEVEL) def create_dependency_graph(self, old_flows, new_flows): time_start_computing =", "link.to_adds + link.to_adds_loop + link.to_adds_only: total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol return total_vol", "% (str(link_segment.seg_path_id), len(link_segment.new_seg))) if link_segment.update_status == constants.SENT_ADDING \\ and len(self.received_updated_msg[msg.seg_path_id])", "@staticmethod def init_logger(): return logger.getLogger(\"Centralized Controller\", constants.LOG_LEVEL) def create_dependency_graph(self, old_flows,", "in update_infos[key].update_nexts.keys(): if sw not in increased: self.current_notification_time[sw] += 1", "{} # executable_link_by_segments = {} # for link in self.links_by_endpoints.values():", "from segment %s\" % (l_segment.init_sw, # l_segment.old_seg[0], # self.links_by_endpoints[pair].avail_cap, #", "= time.clock() def find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints, segments_by_seg_path_id): for link in", "self.log.info(\"avail_cap of link %s: %f, \" \"give %f to segment", "graph: %s\" % str(time() * 1000 - time_start_computing)) def process_coherent(self):", "super(CenCtrlScheduler, self).__init__(0, log_) self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict() self.received_removed_msg", "1000 - time_start_computing)) def process_coherent(self): send_to_sames = set() for key", "__str__(self): return \"Centralized Controller\" @staticmethod def init_logger(): return logger.getLogger(\"Centralized Controller\",", "return update_infos def check_finish_update(self): count = 0 finished = True", "segment %s, new_seg_length = %d\" # % (str(link_segment.seg_path_id), len(link_segment.new_seg))) if", "defaultdict(CenUpdateInfo) self.log.debug(\"handle removed msg %s\" % msg) self.received_removed_msg[msg.seg_path_id].add(msg.src_id) link_segment =", "i in range(len(l_segment.new_seg) - 1): # self.log.debug(\"send to sw%s\" %", "updated msgs for segment %s, new_seg_length = %d\" # %", "= constants.SENT_REMOVING def update_message_queues(self, update_infos, process_update_info_func): increased = set() related_sws", "for i in range(len(l_segment.new_seg) - 1): next_sw = l_segment.new_seg[i +", "u_op in self.links_by_endpoints[pair].to_adds_loop: if u_op.seg_path_id == l_segment.seg_path_id: self.remaining_vol_of_dependency_loop_on_link[pair] -= l_segment.vol", "reset(self): super(CenCtrlScheduler, self).reset() self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict() self.received_removed_msg", "%s, new_seg_length = %d\" # % (str(link_segment.seg_path_id), len(link_segment.new_seg))) if link_segment.update_status", "def execute_all_remove_only_updates(self, update_infos): for l_segment in self.segments_by_seg_path_id.values(): old_sws = set(l_segment.old_seg)", "len(link_segment.new_seg): self.finish_adding_new_path(link_segment, update_infos) return update_infos def finish_adding_new_path(self, link_segment, update_infos): self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id]", "== seg_path_id: return 0 splittable, split_vol = self.check_to_split(link, l_segment) if", "for sw in old_sws: update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id, sw, constants.REMOVE_NEXT) l_segment.update_status", "process_update_info_func): increased = set() related_sws = set([]) for key in", "def find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id): # pool = eventlet.GreenPool() mulog.info(\"start finding", "+ 1 if next_idx < len(link_segment.old_seg): dst = link_segment.old_seg[next_idx] pair", "if current_state == constants.NOTHING \\ or current_state == constants.SENT_ADDING: return", "constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) mulog.info(\"chk&send psb_uds for linksegment", "that is not done anything %d\" % count) self.scheduling_mode =", "import defaultdict from ez_scheduler import EzScheduler from ez_lib.ez_ob import CenUpdateInfo,", "updated msg %s\" % msg) assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True link_segment =", "% (len(link_segment.new_seg)-1)) self.log.debug(\"received from: %s\" % self.received_updated_msg[link_segment.seg_path_id]) update_status = \"SENT_ADDING\"", "in range(len(l_segment.old_seg) - 1): # self.log.debug(\"send to: %s\" % l_segment.old_seg[i])", "= time.time() * 1000 if len(link_segment.old_seg) < 1: link_segment.update_status =", "\\ += self.segments_by_seg_path_id[add_op.seg_path_id].vol def find_dependency_loop_and_sort_updates(self, links_by_endpoints, segments_by_seg_path_id): # pool =", "self.segments_by_seg_path_id.values(): if l_segment.update_status != constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment)", "== constants.SENT_ADDING: return False return True def is_capable(self, seg_path_id): #", "update info %s at %d ms from starting\" % (update_info,", "for pair in l_segment.new_link_seg: self.links_by_endpoints[pair].avail_cap -= l_segment.vol for u_op in", "time import eventlet mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL) class CenCtrlScheduler(EzScheduler): def", "switches_ super(CenCtrlScheduler, self).__init__(0, log_) self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict()", "check_and_do_next_update(self, msg): update_infos = defaultdict(CenUpdateInfo) if not self.received_updated_msg.has_key(msg.seg_path_id): self.received_updated_msg[msg.seg_path_id] =", "for parallel processes ########### self.to_sames = defaultdict(list) self.encounter_deadlock = False", "% msg) assert self.segments_by_seg_path_id.has_key(msg.seg_path_id), True link_segment = self.segments_by_seg_path_id[msg.seg_path_id] # self.log.info(\"receive", "link_segment.old_seg.index(msg.src_id) + 1 if next_idx < len(link_segment.old_seg): dst = link_segment.old_seg[next_idx]", "= link_segment.old_seg[next_idx] pair = (msg.src_id, dst) self.links_by_endpoints[pair].avail_cap += link_segment.vol #", "or self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst)", "self.no_of_pending_msgs = {} self.notification_queues = {x: deque([]) for x in", "current_mode == constants.CONGESTION_MODE: return constants.FINISHED_WITH_DEADLOCK else: return constants.FINISHED_WITHOUT_DEADLOCK def has_not_pending_msg(self):", "link.to_adds_loop: if op.seg_path_id == seg_path_id: is_dependency_loop_op = True break is_add_only", "(str(link_segment.seg_path_id), len(link_segment.new_seg))) if link_segment.update_status == constants.SENT_ADDING \\ and len(self.received_updated_msg[msg.seg_path_id]) ==", "l_segment): pass def splittable_vol(self, seg_path_id): # TODO: Update remaining_vol_of_loop when", "# self.links_by_endpoints[pair].avail_cap, # str(link_segment.seg_path_id))) if len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg) - 1:", "# for link in self.links_by_endpoints.values(): # self.check_all_capable_for_link(link, executable_segments_by_link) def total_pending_cycle_vol(self,", "seg_path_id): # TODO: Update remaining_vol_of_loop when adding or removing segment", "self.encounter_deadlock = False self.do_segmentation = True def __str__(self): return \"Centralized", "seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) mulog.info(\"chk&send psb_uds for linksegment %s\"%l_segment) if", "executable_segments_by_link) def total_pending_cycle_vol(self, link): total_vol = 0 for add_op in", "self.links_by_endpoints[pair].avail_cap += link_segment.vol # self.log.info(\"avail_cap of link %d->%d: %f, \"", "l_segment.old_seg[i + 1] update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.REMOVE_NEXT) self.received_removed_msg[l_segment.seg_path_id] =", "in self.segments_by_seg_path_id.values(): if link_segment.update_status != constants.FINISH_ALL: update_status = '' if", "= UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING", "!= constants.FINISH_ALL: update_status = '' if link_segment.update_status == constants.NOTHING: count", "- 1): # self.log.debug(\"send to sw%s\" % str(l_segment.new_seg[i])) next_sw =", "for link_segment in self.segments_by_seg_path_id.values(): if link_segment.update_status != constants.FINISH_ALL: update_status =", "for x in self.switches} self.current_processing_time = {x: -1 for x", "update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) for sw in old_sws:", "ez_lib import ez_flow_tool from collections import defaultdict from ez_scheduler import", "links_by_endpoints, segments_by_seg_path_id): # pool = eventlet.GreenPool() mulog.info(\"start finding dependency loop", "# def check_and_send_possible_update_by_link(self, update_infos): # executable_segments_by_link = {} # executable_link_by_segments", "global_vars.finish_prioritizing_time = time.clock() def find_dependency_loop_and_sort_updates_by_sw(self, sw, links_by_endpoints, segments_by_seg_path_id): for link", "is not done anything %d\" % count) def check_and_do_next_update(self, msg):", "%s\" % update_info) self.log.debug(\"pending messages: %s\" % str(self.no_of_pending_msgs)) related_sws.add(sw) #self.datapaths[sw", "total_vol = 0 # for op in link.to_adds_loop: # l_segment", "links_by_endpoints, segments_by_seg_path_id): for link in links_by_endpoints.values(): if link.src == sw:", "%s\" % (str(pair), self.links_by_endpoints[pair].avail_cap, l_segment.vol, str(l_segment.seg_path_id))) self.links_by_endpoints[pair].avail_cap -= split_vol for", "update_info = update_infos[key] # self.logger.info(\"Process update info %s at %d", "= 0 #update_next = update_info.update_nexts[sw] process_update_info_func(sw, update_info) self.log.debug(\"add message in", "sw, # links_by_endpoints, segments_by_seg_path_id) self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id) # pool.waitall() #", "def has_not_pending_msg(self): self.log.debug(\"pending queue: %s\" % str(self.no_of_pending_msgs)) for queue_len in", "def increase_processing_time(self, sw): self.current_processing_time[sw] += 1 def enque_msg_to_notification_queue(self, sw, msg):", "link_segment.update_status == constants.SENT_REMOVING: self.log.debug(\"must receive %d more REMOVED msgs\" %", "# self.logger.info(\"Process update info %s at %d ms from starting\"", "%s\"%l_segment) if self.is_capable(l_segment.seg_path_id) or self.encounter_deadlock: if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] =", "l_segment.seg_path_id if not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) pair", "# for op in link.to_adds_loop: # l_segment = self.segments_by_seg_path_id[op.seg_path_id] #", "else: current_mode = self.scheduling_mode self.scheduling_mode = constants.NORMAL_MODE if current_mode ==", "self).__init__(0, log_) self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict() self.received_removed_msg =", "parallel processes ########### self.to_sames = defaultdict(list) self.encounter_deadlock = False self.do_segmentation", "next_sw, constants.REMOVE_NEXT) self.received_removed_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_REMOVING else: l_segment.update_status", "link): self.remaining_vol_of_dependency_loop_on_link[(link.src, link.dst)] = 0 for add_op in link.to_adds_loop: self.remaining_vol_of_dependency_loop_on_link[(link.src,", "= switches_ super(CenCtrlScheduler, self).__init__(0, log_) self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg =", "x in self.switches} self.current_processing_time = {x: -1 for x in", "next_idx = 0 if msg.src_id != link_segment.init_sw: next_idx = link_segment.old_seg.index(msg.src_id)", "elif link_segment.update_status == constants.FINISH_REMOVING: update_status = \"FINISH_REMOVING\" self.log.debug(\"segment %s is", "Controller\", constants.LOG_LEVEL) def create_dependency_graph(self, old_flows, new_flows): time_start_computing = time.time() *", "1): # self.log.debug(\"send to: %s\" % l_segment.old_seg[i]) next_sw = l_segment.old_seg[i", "update_info.update_nexts[sw] process_update_info_func(sw, update_info) self.log.debug(\"add message in processing update_info: %s\" %", "str(link_segment.seg_path_id)) link_segment.update_status = constants.FINISH_ADDING self.release_capacity_send_remove_msg_to_old_segment(update_infos, link_segment) def remove_segment_and_check_to_update(self, msg): assert", "%s\" % (self.links_by_endpoints,self.segments_by_seg_path_id)) # self.log.info(\"time to compute dependency graph: %s\"", "self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING l_segment.is_splitting = True for", "# self.log.info(\"avail_cap of link %d->%d: %f, \" # \"get from", "queue: %s\" % str(self.no_of_pending_msgs)) for queue_len in self.no_of_pending_msgs.values(): if queue_len", "%s\" % self.received_updated_msg[link_segment.seg_path_id]) update_status = \"SENT_ADDING\" elif link_segment.update_status == constants.SENT_REMOVING:", "link.to_adds_only: total_vol += self.segments_by_seg_path_id[add_op.seg_path_id].vol return total_vol def check_to_split(self, link, l_segment):", "%f, \" # \"get from segment %s\" % (msg.src_id, dst,", "l_segment.update_status = constants.SENT_REMOVING else: l_segment.update_status = constants.FINISH_ALL def are_all_moving_in_ops_finished(self, link):", "self.current_notification_time[sw])] = 0 #update_next = update_info.update_nexts[sw] process_update_info_func(sw, update_info) self.log.debug(\"add message", "+ 1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set()", "done_loop = True # endpoints = (link.src, link.dst) # total_vol", "check_and_send_possible_update_by_link(self, update_infos): # executable_segments_by_link = {} # executable_link_by_segments = {}", "% self.received_updated_msg[link_segment.seg_path_id]) update_status = \"SENT_ADDING\" elif link_segment.update_status == constants.SENT_REMOVING: self.log.debug(\"must", "self.segments_by_seg_path_id[u_op.seg_path_id].update_status if current_state == constants.NOTHING \\ or current_state == constants.SENT_ADDING:", "constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1): # self.log.debug(\"send to", "l_segment.vol # self.log.info(\"avail_cap of link %d->%d: %f, \" # \"get", "or (is_add_only and (not self.are_all_moving_in_ops_finished(link) or link.avail_cap < l_segment.vol)): return", "return \"Centralized Controller\" @staticmethod def init_logger(): return logger.getLogger(\"Centralized Controller\", constants.LOG_LEVEL)", "l_segment.flow_src, l_segment.flow_dst) pair = (l_segment.init_sw, l_segment.old_seg[0]) self.links_by_endpoints[pair].avail_cap += l_segment.vol #", "defaultdict() ########## Begin three properties are used for parallel processes", "update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) pair = (l_segment.init_sw, l_segment.old_seg[0])", "segments_by_seg_path_id): # pool = eventlet.GreenPool() mulog.info(\"start finding dependency loop and", "self.do_segmentation = True def reset(self): super(CenCtrlScheduler, self).reset() self.remaining_vol_of_dependency_loop_on_link = {}", "seg_path_id: is_add_only = True break if (not is_dependency_loop_op and (link.avail_cap", "removing segment l_segment = self.segments_by_seg_path_id[seg_path_id] for endpoints in l_segment.new_link_seg: link", "continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) mulog.info(\"chk&send psb_uds for linksegment %s\"%l_segment)", "log_): self.switches = switches_ super(CenCtrlScheduler, self).__init__(0, log_) self.remaining_vol_of_dependency_loop_on_link = {}", "# for link in links_by_endpoints.values(): # ez_flow_tool.compute_scheduling_info_for_a_link(link, links_by_endpoints, segments_by_seg_path_id) #", "global_vars import time import eventlet mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL) class", "msg): assert isinstance(msg, NotificationMessage) update_infos = defaultdict(CenUpdateInfo) self.log.debug(\"handle removed msg", "0 # for op in link.to_adds_loop: # l_segment = self.segments_by_seg_path_id[op.seg_path_id]", "\"give %f to segment %s\" % (str(pair), self.links_by_endpoints[pair].avail_cap, l_segment.vol, str(l_segment.seg_path_id)))", "for x in self.switches} self.current_notification_time = {x: -1 for x", "self.segments_by_seg_path_id) self.log.debug(self.links_by_endpoints) self.log.debug(self.segments_by_seg_path_id) mulog.info(\"links by endpoints %s segs_by_segpath_id %s\" %", "update_infos[key].update_nexts.keys(): if sw not in increased: self.current_notification_time[sw] += 1 increased.add(sw)", "%s\" % str(time() * 1000 - time_start_computing)) def process_coherent(self): send_to_sames", "self.segments_by_seg_path_id, self.to_sames, do_segmentation=self.do_segmentation) self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id) self.log.debug(self.links_by_endpoints) self.log.debug(self.segments_by_seg_path_id) mulog.info(\"links by endpoints", "dst, # self.links_by_endpoints[pair].avail_cap, # str(link_segment.seg_path_id))) if len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg) -", "in self.switches: # pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw, # links_by_endpoints, segments_by_seg_path_id) self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints,", "update_message_queues(self, update_infos, process_update_info_func): increased = set() related_sws = set([]) for", "!= constants.NOTHING: continue seg_path_id = l_segment.seg_path_id self.log.debug(l_segment) split_vol = self.splittable_vol(l_segment.seg_path_id)", "%s\" % l_segment) return final_split_vol def check_and_send_possible_split_updates(self, update_infos): has_execution =", "constants.FINISH_ALL self.log.debug(\"finish %s\" % str(link_segment.seg_path_id)) self.check_and_send_possible_updates(update_infos) return update_infos def check_finish_update(self):", "seg_path_id = l_segment.seg_path_id self.received_removed_msg[seg_path_id] = set() if l_segment.remove_only: if not", "send_to_sames.add(sw) # for sw in send_to_sames: # msg = NotificationMessage(0,", "updates\") mulog.info(links_by_endpoints) for sw in self.switches: # pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw, #", "defaultdict(list) self.encounter_deadlock = False self.do_segmentation = True def __str__(self): return", "next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_ADDING for pair", "done anything %d\" % count) def check_and_send_possible_updates(self, update_infos): has_execution =", "1: link_segment.update_status = constants.FINISH_ALL self.log.debug(\"finish %s\" % str(link_segment.seg_path_id)) self.check_and_send_possible_updates(update_infos) return", "for add_op in link.to_adds + link.to_adds_loop + link.to_adds_only: total_vol +=", "link.to_adds_only: if op.seg_path_id == seg_path_id: return 0 splittable, split_vol =", "not update_infos.has_key(seg_path_id): update_infos[seg_path_id] = CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) pair = (l_segment.init_sw,", "str(self.no_of_pending_msgs)) for queue_len in self.no_of_pending_msgs.values(): if queue_len > 0: return", "= defaultdict(CenUpdateInfo) if not self.received_updated_msg.has_key(msg.seg_path_id): self.received_updated_msg[msg.seg_path_id] = set() self.received_updated_msg[msg.seg_path_id].add(msg.src_id) self.log.debug(\"handle", "l_segment.vol < self.remaining_vol_of_dependency_loop_on_link[endpoints])) \\ or (is_dependency_loop_op and link.avail_cap < l_segment.vol)\\", "update_status = '' if link_segment.update_status == constants.NOTHING: count += 1", "= split_vol self.log.debug(\"capable %s\" % l_segment) return final_split_vol def check_and_send_possible_split_updates(self,", "for endpoints in l_segment.new_link_seg: link = self.links_by_endpoints[endpoints] is_dependency_loop_op = False", "links_by_endpoints.values(): if link.src == sw: self.compute_required_vol_for_dependency_loop(link) current_time = time.clock() if", "return 0 splittable, split_vol = self.check_to_split(link, l_segment) if splittable and", "to: %s\" % l_segment.old_seg[i]) next_sw = l_segment.old_seg[i + 1] update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]]", "= UpdateNext(seg_path_id, next_sw, constants.REMOVE_NEXT) self.received_removed_msg[l_segment.seg_path_id] = set() l_segment.update_status = constants.SENT_REMOVING", "count += 1 update_status = \"NOTHING\" if link_segment.update_status == constants.SENT_ADDING:", "return constants.ENCOUNTER_DEADLOCK else: current_mode = self.scheduling_mode self.scheduling_mode = constants.NORMAL_MODE if", "count) self.scheduling_mode = constants.CONGESTION_MODE return constants.ENCOUNTER_DEADLOCK else: current_mode = self.scheduling_mode", "link, executable_segments_by_link): # capable_segments = [] # done_loop = True", "-= split_vol count = 0 for l_segment in self.segments_by_seg_path_id.values(): if", "mulog.info(\"start finding dependency loop and sort updates\") mulog.info(links_by_endpoints) for sw", "# # def check_and_send_possible_update_by_link(self, update_infos): # executable_segments_by_link = {} #", "for i in range(len(l_segment.old_seg) - 1): # self.log.debug(\"send to: %s\"", "update_status %s.\" % (str(link_segment.seg_path_id), update_status)) # return False finished =", "l_segment.seg_path_id self.log.debug(l_segment) mulog.info(\"chk&send psb_uds for linksegment %s\"%l_segment) if self.is_capable(l_segment.seg_path_id) or", "self.log.debug(\"must receive %d more UPDATED msgs\" % (len(link_segment.new_seg)-1)) self.log.debug(\"received from:", "check_possible_update_by_links(self, update_infos): has_execution = True while has_execution: has_execution = False", "constants.NOTHING \\ or current_state == constants.SENT_ADDING: return False return True", "constants.CONGESTION_MODE: return constants.FINISHED_WITH_DEADLOCK else: return constants.FINISHED_WITHOUT_DEADLOCK def has_not_pending_msg(self): self.log.debug(\"pending queue:", "segs_by_segpath_id %s\" % (self.links_by_endpoints,self.segments_by_seg_path_id)) # self.log.info(\"time to compute dependency graph:", "> split_vol > 0: final_split_vol = split_vol self.log.debug(\"capable %s\" %", "l_segment.update_status == constants.NOTHING: count += 1 self.log.debug(\"number of flows that", "log_) self.remaining_vol_of_dependency_loop_on_link = {} self.received_updated_msg = defaultdict() self.received_removed_msg = defaultdict()", "defaultdict from ez_scheduler import EzScheduler from ez_lib.ez_ob import CenUpdateInfo, UpdateNext", "from: %s\" % self.received_updated_msg[link_segment.seg_path_id]) update_status = \"SENT_ADDING\" elif link_segment.update_status ==", "= UpdateNext(seg_path_id, l_segment.new_seg[0], constants.UPDATE_NEXT) for i in range(len(l_segment.new_seg) - 1):", "len(self.received_removed_msg[msg.seg_path_id]) >= len(link_segment.old_seg) - 1: link_segment.update_status = constants.FINISH_ALL self.log.debug(\"finish %s\"", "False self.do_segmentation = True def reset(self): super(CenCtrlScheduler, self).reset() self.remaining_vol_of_dependency_loop_on_link =", "False for op in link.to_adds_loop: if op.seg_path_id == seg_path_id: is_dependency_loop_op", "logger from domain.message import * from collections import deque from", "def enque_msg_to_notification_queue(self, sw, msg): self.notification_queues[sw].append(msg) self.no_of_pending_msgs[(sw, self.current_notification_time[sw])] += 1 def", "queue_len > 0: return False return True def release_capacity_send_remove_msg_to_old_segment(self, update_infos,", "constants, logger from domain.message import * from collections import deque", "sw, constants.COHERENT_MSG, 0) # self.send_to_switch(msg, sw) def compute_required_vol_for_dependency_loop(self, link): self.remaining_vol_of_dependency_loop_on_link[(link.src,", "self.to_sames.keys(): to_same = self.to_sames[key] for sw in to_same: send_to_sames.add(sw) #", "eventlet.GreenPool() mulog.info(\"start finding dependency loop and sort updates\") mulog.info(links_by_endpoints) for", "link_segment in self.segments_by_seg_path_id.values(): if link_segment.update_status != constants.FINISH_ALL: update_status = ''", "class CenCtrlScheduler(EzScheduler): def __init__(self, switches_, log_): self.switches = switches_ super(CenCtrlScheduler,", "{} self.received_updated_msg = defaultdict() self.received_removed_msg = defaultdict() ########## Begin three", "constants.SENT_ADDING: self.log.debug(\"must receive %d more UPDATED msgs\" % (len(link_segment.new_seg)-1)) self.log.debug(\"received", "self.links_by_endpoints[endpoints] is_dependency_loop_op = False for op in link.to_adds_loop: if op.seg_path_id", "%s\" % self.received_removed_msg[link_segment.seg_path_id]) update_status = \"SENT REMOVING\" elif link_segment.update_status ==", "\"get from segment %s\" % (l_segment.init_sw, # l_segment.old_seg[0], # self.links_by_endpoints[pair].avail_cap,", "UpdateNext(l_segment.seg_path_id, sw, constants.REMOVE_NEXT) l_segment.update_status = constants.SENT_REMOVING def update_message_queues(self, update_infos, process_update_info_func):", "self.do_segmentation = True def __str__(self): return \"Centralized Controller\" @staticmethod def", "CenUpdateInfo(seg_path_id, l_segment.flow_src, l_segment.flow_dst) for sw in old_sws: update_infos[seg_path_id].update_nexts[sw] = UpdateNext(l_segment.seg_path_id,", "execute_all_remove_only_updates(self, update_infos): for l_segment in self.segments_by_seg_path_id.values(): old_sws = set(l_segment.old_seg) old_sws.add(l_segment.init_sw)", "1] update_info.update_nexts[l_segment.new_seg[i]] = UpdateNext(seg_path_id, next_sw, constants.ADD_NEXT) self.received_updated_msg[l_segment.seg_path_id] = set() l_segment.update_status", "is_capable(self, seg_path_id): # TODO: Update remaining_vol_of_loop when adding or removing", "= defaultdict(list) self.encounter_deadlock = False self.do_segmentation = True def reset(self):", "in send_to_sames: # msg = NotificationMessage(0, sw, constants.COHERENT_MSG, 0) #", "count = 0 finished = True for link_segment in self.segments_by_seg_path_id.values():", "(update_info, (time() - self.current_start_time)*1000)) assert update_info, CenUpdateInfo for sw in", "self.remaining_vol_of_dependency_loop_on_link[pair] -= split_vol count = 0 for l_segment in self.segments_by_seg_path_id.values():", "i in range(len(l_segment.old_seg) - 1): # self.log.debug(\"send to: %s\" %", "% l_segment.old_seg[i]) next_sw = l_segment.old_seg[i + 1] update_infos[seg_path_id].update_nexts[l_segment.old_seg[i]] = UpdateNext(seg_path_id,", "UpdateNext from misc import constants, logger from domain.message import *", "import eventlet mulog = logger.getLogger('cen_scheduler', constants.LOG_LEVEL) class CenCtrlScheduler(EzScheduler): def __init__(self,", "1]) return related_sws def increase_processing_time(self, sw): self.current_processing_time[sw] += 1 def", "# self.log.debug(\"send to sw%s\" % str(l_segment.new_seg[i])) next_sw = l_segment.new_seg[i +", "self.compute_required_vol_for_dependency_loop(link) current_time = time.clock() if global_vars.finish_computation_time < current_time: global_vars.finish_computation_time =", "= False self.do_segmentation = True def __str__(self): return \"Centralized Controller\"", "remove_segment_and_check_to_update(self, msg): assert isinstance(msg, NotificationMessage) update_infos = defaultdict(CenUpdateInfo) self.log.debug(\"handle removed", "= (msg.src_id, dst) self.links_by_endpoints[pair].avail_cap += link_segment.vol # self.log.info(\"avail_cap of link", "init_logger(): return logger.getLogger(\"Centralized Controller\", constants.LOG_LEVEL) def create_dependency_graph(self, old_flows, new_flows): time_start_computing", "return constants.FINISHED_WITHOUT_DEADLOCK def has_not_pending_msg(self): self.log.debug(\"pending queue: %s\" % str(self.no_of_pending_msgs)) for", "> 0: return False return True def release_capacity_send_remove_msg_to_old_segment(self, update_infos, l_segment):", "1000 ez_flow_tool.create_dependency_graph(old_flows, new_flows, self.links_by_endpoints, self.segments_by_seg_path_id, self.to_sames, do_segmentation=self.do_segmentation) self.find_dependency_loop_and_sort_updates(self.links_by_endpoints, self.segments_by_seg_path_id) self.log.debug(self.links_by_endpoints)", "return update_infos def finish_adding_new_path(self, link_segment, update_infos): self.trace.time_using_new_path_by_seg_path_id[link_segment.seg_path_id] = time.time() *", "endpoints = (link.src, link.dst) # total_vol = 0 # for", "link): for u_op in link.to_adds + link.to_adds_loop: current_state = self.segments_by_seg_path_id[u_op.seg_path_id].update_status", "########## Begin three properties are used for parallel processes ##########", "self.switches: # pool.spawn_n(self.find_dependency_loop_and_sort_updates_by_sw, sw, # links_by_endpoints, segments_by_seg_path_id) self.find_dependency_loop_and_sort_updates_by_sw(sw, links_by_endpoints, segments_by_seg_path_id)", "== constants.SENT_REMOVING: self.log.debug(\"must receive %d more REMOVED msgs\" % (len(link_segment.old_seg)-1))", "has_pending_msg_of_sw(self, sw): return self.no_of_pending_msgs[(sw, self.current_processing_time[sw])] > 0 # def check_all_capable_for_link(self," ]
[ "255, nothing) cv.createTrackbar('G', 'image', 0, 255, nothing) cv.createTrackbar('B', 'image', 0,", "cv.createTrackbar('G', 'image', 0, 255, nothing) cv.createTrackbar('B', 'image', 0, 255, nothing)", "cv def nothing(x): pass # Create a black image, a", "이미지 표시 cv.imshow('image', img) if cv.waitKey(10) > 0: break cv.destroyAllWindows()", "0, 1, nothing) while True: # get current positions of", "'image', 0, 1, nothing) while True: # get current positions", "'image', 0, 255, nothing) cv.createTrackbar('G', 'image', 0, 255, nothing) cv.createTrackbar('B',", "a black image, a window img = np.zeros((300, 512, 3),", "흑백, 켜져 있으면 색상 if s == 0: img[:] =", "np import cv2 as cv def nothing(x): pass # Create", "# create switch for ON/OFF functionality switch = 'OFF/ON' cv.createTrackbar(switch,", "스위치가 꺼져 있으면 흑백, 켜져 있으면 색상 if s ==", "create switch for ON/OFF functionality switch = 'OFF/ON' cv.createTrackbar(switch, 'image',", "= cv.getTrackbarPos(switch, 'image') # 스위치가 꺼져 있으면 흑백, 켜져 있으면", "def nothing(x): pass # Create a black image, a window", "of four trackbars r = cv.getTrackbarPos('R', 'image') g = cv.getTrackbarPos('G',", "'image', 0, 255, nothing) cv.createTrackbar('B', 'image', 0, 255, nothing) #", "img[:] = 0 else: img[:] = [b, g, r] #", "s == 0: img[:] = 0 else: img[:] = [b,", "functionality switch = 'OFF/ON' cv.createTrackbar(switch, 'image', 0, 1, nothing) while", "색상 if s == 0: img[:] = 0 else: img[:]", "nothing) cv.createTrackbar('B', 'image', 0, 255, nothing) # create switch for", "import cv2 as cv def nothing(x): pass # Create a", "black image, a window img = np.zeros((300, 512, 3), np.uint8)", "np.uint8) cv.namedWindow('image') # create trackbars for color change cv.createTrackbar('R', 'image',", "512, 3), np.uint8) cv.namedWindow('image') # create trackbars for color change", "# 이미지 표시 cv.imshow('image', img) if cv.waitKey(10) > 0: break", "cv.getTrackbarPos('B', 'image') s = cv.getTrackbarPos(switch, 'image') # 스위치가 꺼져 있으면", "= cv.getTrackbarPos('R', 'image') g = cv.getTrackbarPos('G', 'image') b = cv.getTrackbarPos('B',", "'image') s = cv.getTrackbarPos(switch, 'image') # 스위치가 꺼져 있으면 흑백,", "[b, g, r] # 이미지 표시 cv.imshow('image', img) if cv.waitKey(10)", "켜져 있으면 색상 if s == 0: img[:] = 0", "0, 255, nothing) cv.createTrackbar('G', 'image', 0, 255, nothing) cv.createTrackbar('B', 'image',", "cv2 as cv def nothing(x): pass # Create a black", "trackbars for color change cv.createTrackbar('R', 'image', 0, 255, nothing) cv.createTrackbar('G',", "if s == 0: img[:] = 0 else: img[:] =", "1, nothing) while True: # get current positions of four", "as np import cv2 as cv def nothing(x): pass #", "cv.createTrackbar(switch, 'image', 0, 1, nothing) while True: # get current", "# get current positions of four trackbars r = cv.getTrackbarPos('R',", "image, a window img = np.zeros((300, 512, 3), np.uint8) cv.namedWindow('image')", "s = cv.getTrackbarPos(switch, 'image') # 스위치가 꺼져 있으면 흑백, 켜져", "= cv.getTrackbarPos('B', 'image') s = cv.getTrackbarPos(switch, 'image') # 스위치가 꺼져", "cv.getTrackbarPos(switch, 'image') # 스위치가 꺼져 있으면 흑백, 켜져 있으면 색상", "switch for ON/OFF functionality switch = 'OFF/ON' cv.createTrackbar(switch, 'image', 0,", "pass # Create a black image, a window img =", "255, nothing) # create switch for ON/OFF functionality switch =", "g = cv.getTrackbarPos('G', 'image') b = cv.getTrackbarPos('B', 'image') s =", "window img = np.zeros((300, 512, 3), np.uint8) cv.namedWindow('image') # create", "r] # 이미지 표시 cv.imshow('image', img) if cv.waitKey(10) > 0:", "'OFF/ON' cv.createTrackbar(switch, 'image', 0, 1, nothing) while True: # get", "a window img = np.zeros((300, 512, 3), np.uint8) cv.namedWindow('image') #", "cv.createTrackbar('R', 'image', 0, 255, nothing) cv.createTrackbar('G', 'image', 0, 255, nothing)", "있으면 색상 if s == 0: img[:] = 0 else:", "color change cv.createTrackbar('R', 'image', 0, 255, nothing) cv.createTrackbar('G', 'image', 0,", "== 0: img[:] = 0 else: img[:] = [b, g,", "import numpy as np import cv2 as cv def nothing(x):", "for ON/OFF functionality switch = 'OFF/ON' cv.createTrackbar(switch, 'image', 0, 1,", "as cv def nothing(x): pass # Create a black image,", "positions of four trackbars r = cv.getTrackbarPos('R', 'image') g =", "0 else: img[:] = [b, g, r] # 이미지 표시", "True: # get current positions of four trackbars r =", "0: img[:] = 0 else: img[:] = [b, g, r]", "img = np.zeros((300, 512, 3), np.uint8) cv.namedWindow('image') # create trackbars", "0, 255, nothing) # create switch for ON/OFF functionality switch", "change cv.createTrackbar('R', 'image', 0, 255, nothing) cv.createTrackbar('G', 'image', 0, 255,", "# create trackbars for color change cv.createTrackbar('R', 'image', 0, 255,", "3), np.uint8) cv.namedWindow('image') # create trackbars for color change cv.createTrackbar('R',", "nothing) # create switch for ON/OFF functionality switch = 'OFF/ON'", "0, 255, nothing) cv.createTrackbar('B', 'image', 0, 255, nothing) # create", "= [b, g, r] # 이미지 표시 cv.imshow('image', img) if", "b = cv.getTrackbarPos('B', 'image') s = cv.getTrackbarPos(switch, 'image') # 스위치가", "np.zeros((300, 512, 3), np.uint8) cv.namedWindow('image') # create trackbars for color", "four trackbars r = cv.getTrackbarPos('R', 'image') g = cv.getTrackbarPos('G', 'image')", "current positions of four trackbars r = cv.getTrackbarPos('R', 'image') g", "switch = 'OFF/ON' cv.createTrackbar(switch, 'image', 0, 1, nothing) while True:", "# 스위치가 꺼져 있으면 흑백, 켜져 있으면 색상 if s", "ON/OFF functionality switch = 'OFF/ON' cv.createTrackbar(switch, 'image', 0, 1, nothing)", "'image') g = cv.getTrackbarPos('G', 'image') b = cv.getTrackbarPos('B', 'image') s", "nothing(x): pass # Create a black image, a window img", "꺼져 있으면 흑백, 켜져 있으면 색상 if s == 0:", "'image') b = cv.getTrackbarPos('B', 'image') s = cv.getTrackbarPos(switch, 'image') #", "= cv.getTrackbarPos('G', 'image') b = cv.getTrackbarPos('B', 'image') s = cv.getTrackbarPos(switch,", "else: img[:] = [b, g, r] # 이미지 표시 cv.imshow('image',", "g, r] # 이미지 표시 cv.imshow('image', img) if cv.waitKey(10) >", "while True: # get current positions of four trackbars r", "cv.namedWindow('image') # create trackbars for color change cv.createTrackbar('R', 'image', 0,", "'image', 0, 255, nothing) # create switch for ON/OFF functionality", "Create a black image, a window img = np.zeros((300, 512,", "있으면 흑백, 켜져 있으면 색상 if s == 0: img[:]", "<reponame>clovadev/opencv-python import numpy as np import cv2 as cv def", "img[:] = [b, g, r] # 이미지 표시 cv.imshow('image', img)", "nothing) cv.createTrackbar('G', 'image', 0, 255, nothing) cv.createTrackbar('B', 'image', 0, 255,", "numpy as np import cv2 as cv def nothing(x): pass", "trackbars r = cv.getTrackbarPos('R', 'image') g = cv.getTrackbarPos('G', 'image') b", "= np.zeros((300, 512, 3), np.uint8) cv.namedWindow('image') # create trackbars for", "create trackbars for color change cv.createTrackbar('R', 'image', 0, 255, nothing)", "for color change cv.createTrackbar('R', 'image', 0, 255, nothing) cv.createTrackbar('G', 'image',", "cv.getTrackbarPos('R', 'image') g = cv.getTrackbarPos('G', 'image') b = cv.getTrackbarPos('B', 'image')", "# Create a black image, a window img = np.zeros((300,", "cv.getTrackbarPos('G', 'image') b = cv.getTrackbarPos('B', 'image') s = cv.getTrackbarPos(switch, 'image')", "nothing) while True: # get current positions of four trackbars", "= 0 else: img[:] = [b, g, r] # 이미지", "255, nothing) cv.createTrackbar('B', 'image', 0, 255, nothing) # create switch", "cv.createTrackbar('B', 'image', 0, 255, nothing) # create switch for ON/OFF", "= 'OFF/ON' cv.createTrackbar(switch, 'image', 0, 1, nothing) while True: #", "r = cv.getTrackbarPos('R', 'image') g = cv.getTrackbarPos('G', 'image') b =", "get current positions of four trackbars r = cv.getTrackbarPos('R', 'image')", "'image') # 스위치가 꺼져 있으면 흑백, 켜져 있으면 색상 if" ]
[ "if pt2 == 0: working = elves.difference(complete) for elf in", "range(1, int(n**0.5)+1, step) if not n % i))) def solve(target):", "functools import reduce # https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python def factors(n): step = 2", "= sum(elves)*10 if gifts1 >= target: pt1 = house_count if", "= sum(working)*11 if gifts2 >= target: pt2 = house_count return", "around 20s pt1, pt2 = solve(29000000) print(\"Part 1:\", pt1) print(\"Part", "# https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python def factors(n): step = 2 if n%2 else", "= 0 elves = factors(house_count) if pt1 == 0: gifts1", "= 0 deliveries = {} complete = set() pt1 =", "if n%2 else 1 return set(reduce(list.__add__, ([i, n//i] for i", "pt1 == 0: gifts1 = sum(elves)*10 if gifts1 >= target:", "sum(elves)*10 if gifts1 >= target: pt1 = house_count if pt2", "return set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5)+1, step)", "pt2 = house_count return pt1, pt2 # takes around 20s", "gifts2 >= target: pt2 = house_count return pt1, pt2 #", "= 2 if n%2 else 1 return set(reduce(list.__add__, ([i, n//i]", "step = 2 if n%2 else 1 return set(reduce(list.__add__, ([i,", "def solve(target): house_count = 0 deliveries = {} complete =", "set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5)+1, step) if", "pt1, pt2 = solve(29000000) print(\"Part 1:\", pt1) print(\"Part 2:\", pt2)", "house_count if pt2 == 0: working = elves.difference(complete) for elf", "pt2 # takes around 20s pt1, pt2 = solve(29000000) print(\"Part", "complete.add(elf) else: deliveries[elf] = 1 gifts2 = sum(working)*11 if gifts2", "0: working = elves.difference(complete) for elf in working: if elf", "+= 1 gifts1 = 0 gifts2 = 0 elves =", "i in range(1, int(n**0.5)+1, step) if not n % i)))", "pt1, pt2 # takes around 20s pt1, pt2 = solve(29000000)", "else: deliveries[elf] = 1 gifts2 = sum(working)*11 if gifts2 >=", "in deliveries: deliveries[elf] += 1 if deliveries[elf] == 50: complete.add(elf)", "50: complete.add(elf) else: deliveries[elf] = 1 gifts2 = sum(working)*11 if", "if gifts1 >= target: pt1 = house_count if pt2 ==", "= 0 pt2 = 0 while pt1 == 0 or", "house_count = 0 deliveries = {} complete = set() pt1", "1 return set(reduce(list.__add__, ([i, n//i] for i in range(1, int(n**0.5)+1,", "for i in range(1, int(n**0.5)+1, step) if not n %", "== 0: house_count += 1 gifts1 = 0 gifts2 =", "set() pt1 = 0 pt2 = 0 while pt1 ==", "def factors(n): step = 2 if n%2 else 1 return", "i))) def solve(target): house_count = 0 deliveries = {} complete", "deliveries[elf] += 1 if deliveries[elf] == 50: complete.add(elf) else: deliveries[elf]", "takes around 20s pt1, pt2 = solve(29000000) print(\"Part 1:\", pt1)", "0 pt2 = 0 while pt1 == 0 or pt2", "target: pt1 = house_count if pt2 == 0: working =", "1 if deliveries[elf] == 50: complete.add(elf) else: deliveries[elf] = 1", "if elf in deliveries: deliveries[elf] += 1 if deliveries[elf] ==", "= set() pt1 = 0 pt2 = 0 while pt1", "= 1 gifts2 = sum(working)*11 if gifts2 >= target: pt2", "for elf in working: if elf in deliveries: deliveries[elf] +=", "int(n**0.5)+1, step) if not n % i))) def solve(target): house_count", "0 or pt2 == 0: house_count += 1 gifts1 =", "from functools import reduce # https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python def factors(n): step =", "elf in deliveries: deliveries[elf] += 1 if deliveries[elf] == 50:", ">= target: pt2 = house_count return pt1, pt2 # takes", "n//i] for i in range(1, int(n**0.5)+1, step) if not n", "return pt1, pt2 # takes around 20s pt1, pt2 =", "== 0: gifts1 = sum(elves)*10 if gifts1 >= target: pt1", "https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python def factors(n): step = 2 if n%2 else 1", "{} complete = set() pt1 = 0 pt2 = 0", "elves.difference(complete) for elf in working: if elf in deliveries: deliveries[elf]", "0 while pt1 == 0 or pt2 == 0: house_count", "0 deliveries = {} complete = set() pt1 = 0", "working = elves.difference(complete) for elf in working: if elf in", "2 if n%2 else 1 return set(reduce(list.__add__, ([i, n//i] for", "if not n % i))) def solve(target): house_count = 0", "+= 1 if deliveries[elf] == 50: complete.add(elf) else: deliveries[elf] =", "1 gifts2 = sum(working)*11 if gifts2 >= target: pt2 =", "gifts2 = sum(working)*11 if gifts2 >= target: pt2 = house_count", "factors(n): step = 2 if n%2 else 1 return set(reduce(list.__add__,", "# takes around 20s pt1, pt2 = solve(29000000) print(\"Part 1:\",", "gifts1 = sum(elves)*10 if gifts1 >= target: pt1 = house_count", "in range(1, int(n**0.5)+1, step) if not n % i))) def", "import reduce # https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python def factors(n): step = 2 if", "factors(house_count) if pt1 == 0: gifts1 = sum(elves)*10 if gifts1", "or pt2 == 0: house_count += 1 gifts1 = 0", "0 gifts2 = 0 elves = factors(house_count) if pt1 ==", "reduce # https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python def factors(n): step = 2 if n%2", "([i, n//i] for i in range(1, int(n**0.5)+1, step) if not", "pt1 == 0 or pt2 == 0: house_count += 1", "gifts1 = 0 gifts2 = 0 elves = factors(house_count) if", "0 elves = factors(house_count) if pt1 == 0: gifts1 =", "== 0: working = elves.difference(complete) for elf in working: if", "pt1 = house_count if pt2 == 0: working = elves.difference(complete)", "deliveries[elf] == 50: complete.add(elf) else: deliveries[elf] = 1 gifts2 =", "== 50: complete.add(elf) else: deliveries[elf] = 1 gifts2 = sum(working)*11", "pt2 = 0 while pt1 == 0 or pt2 ==", "== 0 or pt2 == 0: house_count += 1 gifts1", "sum(working)*11 if gifts2 >= target: pt2 = house_count return pt1,", "deliveries: deliveries[elf] += 1 if deliveries[elf] == 50: complete.add(elf) else:", "deliveries[elf] = 1 gifts2 = sum(working)*11 if gifts2 >= target:", "else 1 return set(reduce(list.__add__, ([i, n//i] for i in range(1,", "complete = set() pt1 = 0 pt2 = 0 while", "n%2 else 1 return set(reduce(list.__add__, ([i, n//i] for i in", "not n % i))) def solve(target): house_count = 0 deliveries", "pt2 == 0: working = elves.difference(complete) for elf in working:", "= house_count if pt2 == 0: working = elves.difference(complete) for", "step) if not n % i))) def solve(target): house_count =", "solve(target): house_count = 0 deliveries = {} complete = set()", "= 0 while pt1 == 0 or pt2 == 0:", "gifts1 >= target: pt1 = house_count if pt2 == 0:", "n % i))) def solve(target): house_count = 0 deliveries =", "pt1 = 0 pt2 = 0 while pt1 == 0", "pt2 == 0: house_count += 1 gifts1 = 0 gifts2", "target: pt2 = house_count return pt1, pt2 # takes around", "while pt1 == 0 or pt2 == 0: house_count +=", "working: if elf in deliveries: deliveries[elf] += 1 if deliveries[elf]", "20s pt1, pt2 = solve(29000000) print(\"Part 1:\", pt1) print(\"Part 2:\",", "= elves.difference(complete) for elf in working: if elf in deliveries:", "house_count return pt1, pt2 # takes around 20s pt1, pt2", "= {} complete = set() pt1 = 0 pt2 =", "elf in working: if elf in deliveries: deliveries[elf] += 1", "elves = factors(house_count) if pt1 == 0: gifts1 = sum(elves)*10", "<reponame>ambertests/adventofcode from functools import reduce # https://stackoverflow.com/questions/6800193/what-is-the-most-efficient-way-of-finding-all-the-factors-of-a-number-in-python def factors(n): step", "if pt1 == 0: gifts1 = sum(elves)*10 if gifts1 >=", "house_count += 1 gifts1 = 0 gifts2 = 0 elves", ">= target: pt1 = house_count if pt2 == 0: working", "if deliveries[elf] == 50: complete.add(elf) else: deliveries[elf] = 1 gifts2", "= house_count return pt1, pt2 # takes around 20s pt1,", "if gifts2 >= target: pt2 = house_count return pt1, pt2", "gifts2 = 0 elves = factors(house_count) if pt1 == 0:", "= factors(house_count) if pt1 == 0: gifts1 = sum(elves)*10 if", "0: gifts1 = sum(elves)*10 if gifts1 >= target: pt1 =", "in working: if elf in deliveries: deliveries[elf] += 1 if", "% i))) def solve(target): house_count = 0 deliveries = {}", "deliveries = {} complete = set() pt1 = 0 pt2", "1 gifts1 = 0 gifts2 = 0 elves = factors(house_count)", "0: house_count += 1 gifts1 = 0 gifts2 = 0", "= 0 gifts2 = 0 elves = factors(house_count) if pt1" ]
[ ":: Python :: 3.7\", \"Programming Language :: Python :: Implementation", "app for managing GitHub labels for Python 3.6 and newer.", "pathlib import setuptools def read(*args: str) -> str: file_path =", "project_urls={ \"Repository\": \"https://github.com/hackebrot/labels\", \"Issues\": \"https://github.com/hackebrot/labels/issues\", }, description=\"CLI app for managing", "3 - Alpha\", \"Intended Audience :: Developers\", \"License :: OSI", ":: OS Independent\", \"Programming Language :: Python :: 3 ::", "maintainer=\"<NAME>\", maintainer_email=\"<EMAIL>\", license=\"MIT\", url=\"https://github.com/hackebrot/labels\", project_urls={ \"Repository\": \"https://github.com/hackebrot/labels\", \"Issues\": \"https://github.com/hackebrot/labels/issues\", },", "= pathlib.Path(__file__).parent.joinpath(*args) return file_path.read_text(\"utf-8\") setuptools.setup( name=\"labels\", version=\"0.3.0.dev0\", author=\"<NAME>\", author_email=\"<EMAIL>\", maintainer=\"<NAME>\",", "\"Programming Language :: Python :: 3.7\", \"Programming Language :: Python", "def read(*args: str) -> str: file_path = pathlib.Path(__file__).parent.joinpath(*args) return file_path.read_text(\"utf-8\")", "Language :: Python :: 3.6\", \"Programming Language :: Python ::", "entry_points={\"console_scripts\": [\"labels = labels.cli:labels\"]}, classifiers=[ \"Development Status :: 3 -", "\"Development Status :: 3 - Alpha\", \"Intended Audience :: Developers\",", "str) -> str: file_path = pathlib.Path(__file__).parent.joinpath(*args) return file_path.read_text(\"utf-8\") setuptools.setup( name=\"labels\",", "file_path = pathlib.Path(__file__).parent.joinpath(*args) return file_path.read_text(\"utf-8\") setuptools.setup( name=\"labels\", version=\"0.3.0.dev0\", author=\"<NAME>\", author_email=\"<EMAIL>\",", "\"Repository\": \"https://github.com/hackebrot/labels\", \"Issues\": \"https://github.com/hackebrot/labels/issues\", }, description=\"CLI app for managing GitHub", "\"Programming Language :: Python :: 3.6\", \"Programming Language :: Python", "import setuptools def read(*args: str) -> str: file_path = pathlib.Path(__file__).parent.joinpath(*args)", "GitHub labels for Python 3.6 and newer. 📝\", long_description=read(\"README.md\"), long_description_content_type=\"text/markdown\",", "Status :: 3 - Alpha\", \"Intended Audience :: Developers\", \"License", "}, description=\"CLI app for managing GitHub labels for Python 3.6", "English\", \"Operating System :: OS Independent\", \"Programming Language :: Python", "author=\"<NAME>\", author_email=\"<EMAIL>\", maintainer=\"<NAME>\", maintainer_email=\"<EMAIL>\", license=\"MIT\", url=\"https://github.com/hackebrot/labels\", project_urls={ \"Repository\": \"https://github.com/hackebrot/labels\", \"Issues\":", "Language :: Python :: 3 :: Only\", \"Programming Language ::", ":: 3.7\", \"Programming Language :: Python :: Implementation :: CPython\",", "\"Programming Language :: Python :: 3 :: Only\", \"Programming Language", "Alpha\", \"Intended Audience :: Developers\", \"License :: OSI Approved ::", "\"https://github.com/hackebrot/labels/issues\", }, description=\"CLI app for managing GitHub labels for Python", "System :: OS Independent\", \"Programming Language :: Python :: 3", ":: Developers\", \"License :: OSI Approved :: MIT License\", \"Natural", "Implementation :: CPython\", \"Topic :: Utilities\", ], keywords=[\"github\", \"command-line\"], )", "license=\"MIT\", url=\"https://github.com/hackebrot/labels\", project_urls={ \"Repository\": \"https://github.com/hackebrot/labels\", \"Issues\": \"https://github.com/hackebrot/labels/issues\", }, description=\"CLI app", ":: 3 :: Only\", \"Programming Language :: Python :: 3.6\",", "OS Independent\", \"Programming Language :: Python :: 3 :: Only\",", ":: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language", "\"pytoml\", \"attrs\"], entry_points={\"console_scripts\": [\"labels = labels.cli:labels\"]}, classifiers=[ \"Development Status ::", "Approved :: MIT License\", \"Natural Language :: English\", \"Operating System", "Python 3.6 and newer. 📝\", long_description=read(\"README.md\"), long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(\"src\"), package_dir={\"\": \"src\"},", "\"Intended Audience :: Developers\", \"License :: OSI Approved :: MIT", "Language :: Python :: 3.7\", \"Programming Language :: Python ::", "\"src\"}, include_package_data=True, zip_safe=False, python_requires=\">=3.6\", install_requires=[\"click\", \"requests\", \"pytoml\", \"attrs\"], entry_points={\"console_scripts\": [\"labels", "OSI Approved :: MIT License\", \"Natural Language :: English\", \"Operating", "long_description=read(\"README.md\"), long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(\"src\"), package_dir={\"\": \"src\"}, include_package_data=True, zip_safe=False, python_requires=\">=3.6\", install_requires=[\"click\", \"requests\",", "setuptools.setup( name=\"labels\", version=\"0.3.0.dev0\", author=\"<NAME>\", author_email=\"<EMAIL>\", maintainer=\"<NAME>\", maintainer_email=\"<EMAIL>\", license=\"MIT\", url=\"https://github.com/hackebrot/labels\", project_urls={", "zip_safe=False, python_requires=\">=3.6\", install_requires=[\"click\", \"requests\", \"pytoml\", \"attrs\"], entry_points={\"console_scripts\": [\"labels = labels.cli:labels\"]},", "= labels.cli:labels\"]}, classifiers=[ \"Development Status :: 3 - Alpha\", \"Intended", "\"Programming Language :: Python :: Implementation :: CPython\", \"Topic ::", "\"Issues\": \"https://github.com/hackebrot/labels/issues\", }, description=\"CLI app for managing GitHub labels for", "return file_path.read_text(\"utf-8\") setuptools.setup( name=\"labels\", version=\"0.3.0.dev0\", author=\"<NAME>\", author_email=\"<EMAIL>\", maintainer=\"<NAME>\", maintainer_email=\"<EMAIL>\", license=\"MIT\",", "python_requires=\">=3.6\", install_requires=[\"click\", \"requests\", \"pytoml\", \"attrs\"], entry_points={\"console_scripts\": [\"labels = labels.cli:labels\"]}, classifiers=[", "\"Natural Language :: English\", \"Operating System :: OS Independent\", \"Programming", "License\", \"Natural Language :: English\", \"Operating System :: OS Independent\",", ":: MIT License\", \"Natural Language :: English\", \"Operating System ::", "include_package_data=True, zip_safe=False, python_requires=\">=3.6\", install_requires=[\"click\", \"requests\", \"pytoml\", \"attrs\"], entry_points={\"console_scripts\": [\"labels =", ":: Python :: 3.6\", \"Programming Language :: Python :: 3.7\",", "file_path.read_text(\"utf-8\") setuptools.setup( name=\"labels\", version=\"0.3.0.dev0\", author=\"<NAME>\", author_email=\"<EMAIL>\", maintainer=\"<NAME>\", maintainer_email=\"<EMAIL>\", license=\"MIT\", url=\"https://github.com/hackebrot/labels\",", "\"License :: OSI Approved :: MIT License\", \"Natural Language ::", "-> str: file_path = pathlib.Path(__file__).parent.joinpath(*args) return file_path.read_text(\"utf-8\") setuptools.setup( name=\"labels\", version=\"0.3.0.dev0\",", "3 :: Only\", \"Programming Language :: Python :: 3.6\", \"Programming", ":: Python :: Implementation :: CPython\", \"Topic :: Utilities\", ],", "classifiers=[ \"Development Status :: 3 - Alpha\", \"Intended Audience ::", "import pathlib import setuptools def read(*args: str) -> str: file_path", "\"requests\", \"pytoml\", \"attrs\"], entry_points={\"console_scripts\": [\"labels = labels.cli:labels\"]}, classifiers=[ \"Development Status", "3.7\", \"Programming Language :: Python :: Implementation :: CPython\", \"Topic", "package_dir={\"\": \"src\"}, include_package_data=True, zip_safe=False, python_requires=\">=3.6\", install_requires=[\"click\", \"requests\", \"pytoml\", \"attrs\"], entry_points={\"console_scripts\":", "and newer. 📝\", long_description=read(\"README.md\"), long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(\"src\"), package_dir={\"\": \"src\"}, include_package_data=True, zip_safe=False,", "- Alpha\", \"Intended Audience :: Developers\", \"License :: OSI Approved", "labels.cli:labels\"]}, classifiers=[ \"Development Status :: 3 - Alpha\", \"Intended Audience", "Python :: 3.6\", \"Programming Language :: Python :: 3.7\", \"Programming", "newer. 📝\", long_description=read(\"README.md\"), long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(\"src\"), package_dir={\"\": \"src\"}, include_package_data=True, zip_safe=False, python_requires=\">=3.6\",", "packages=setuptools.find_packages(\"src\"), package_dir={\"\": \"src\"}, include_package_data=True, zip_safe=False, python_requires=\">=3.6\", install_requires=[\"click\", \"requests\", \"pytoml\", \"attrs\"],", "str: file_path = pathlib.Path(__file__).parent.joinpath(*args) return file_path.read_text(\"utf-8\") setuptools.setup( name=\"labels\", version=\"0.3.0.dev0\", author=\"<NAME>\",", "Language :: Python :: Implementation :: CPython\", \"Topic :: Utilities\",", ":: Implementation :: CPython\", \"Topic :: Utilities\", ], keywords=[\"github\", \"command-line\"],", "description=\"CLI app for managing GitHub labels for Python 3.6 and", ":: Python :: 3 :: Only\", \"Programming Language :: Python", "read(*args: str) -> str: file_path = pathlib.Path(__file__).parent.joinpath(*args) return file_path.read_text(\"utf-8\") setuptools.setup(", "\"https://github.com/hackebrot/labels\", \"Issues\": \"https://github.com/hackebrot/labels/issues\", }, description=\"CLI app for managing GitHub labels", "name=\"labels\", version=\"0.3.0.dev0\", author=\"<NAME>\", author_email=\"<EMAIL>\", maintainer=\"<NAME>\", maintainer_email=\"<EMAIL>\", license=\"MIT\", url=\"https://github.com/hackebrot/labels\", project_urls={ \"Repository\":", "Audience :: Developers\", \"License :: OSI Approved :: MIT License\",", "version=\"0.3.0.dev0\", author=\"<NAME>\", author_email=\"<EMAIL>\", maintainer=\"<NAME>\", maintainer_email=\"<EMAIL>\", license=\"MIT\", url=\"https://github.com/hackebrot/labels\", project_urls={ \"Repository\": \"https://github.com/hackebrot/labels\",", "3.6 and newer. 📝\", long_description=read(\"README.md\"), long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(\"src\"), package_dir={\"\": \"src\"}, include_package_data=True,", "\"attrs\"], entry_points={\"console_scripts\": [\"labels = labels.cli:labels\"]}, classifiers=[ \"Development Status :: 3", ":: 3 - Alpha\", \"Intended Audience :: Developers\", \"License ::", "author_email=\"<EMAIL>\", maintainer=\"<NAME>\", maintainer_email=\"<EMAIL>\", license=\"MIT\", url=\"https://github.com/hackebrot/labels\", project_urls={ \"Repository\": \"https://github.com/hackebrot/labels\", \"Issues\": \"https://github.com/hackebrot/labels/issues\",", "MIT License\", \"Natural Language :: English\", \"Operating System :: OS", "setuptools def read(*args: str) -> str: file_path = pathlib.Path(__file__).parent.joinpath(*args) return", "<reponame>jean/labels<gh_stars>1-10 import pathlib import setuptools def read(*args: str) -> str:", "📝\", long_description=read(\"README.md\"), long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(\"src\"), package_dir={\"\": \"src\"}, include_package_data=True, zip_safe=False, python_requires=\">=3.6\", install_requires=[\"click\",", "Python :: 3.7\", \"Programming Language :: Python :: Implementation ::", "Independent\", \"Programming Language :: Python :: 3 :: Only\", \"Programming", "3.6\", \"Programming Language :: Python :: 3.7\", \"Programming Language ::", "for Python 3.6 and newer. 📝\", long_description=read(\"README.md\"), long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(\"src\"), package_dir={\"\":", "[\"labels = labels.cli:labels\"]}, classifiers=[ \"Development Status :: 3 - Alpha\",", ":: OSI Approved :: MIT License\", \"Natural Language :: English\",", "install_requires=[\"click\", \"requests\", \"pytoml\", \"attrs\"], entry_points={\"console_scripts\": [\"labels = labels.cli:labels\"]}, classifiers=[ \"Development", "maintainer_email=\"<EMAIL>\", license=\"MIT\", url=\"https://github.com/hackebrot/labels\", project_urls={ \"Repository\": \"https://github.com/hackebrot/labels\", \"Issues\": \"https://github.com/hackebrot/labels/issues\", }, description=\"CLI", "url=\"https://github.com/hackebrot/labels\", project_urls={ \"Repository\": \"https://github.com/hackebrot/labels\", \"Issues\": \"https://github.com/hackebrot/labels/issues\", }, description=\"CLI app for", ":: Only\", \"Programming Language :: Python :: 3.6\", \"Programming Language", "Python :: 3 :: Only\", \"Programming Language :: Python ::", "managing GitHub labels for Python 3.6 and newer. 📝\", long_description=read(\"README.md\"),", "pathlib.Path(__file__).parent.joinpath(*args) return file_path.read_text(\"utf-8\") setuptools.setup( name=\"labels\", version=\"0.3.0.dev0\", author=\"<NAME>\", author_email=\"<EMAIL>\", maintainer=\"<NAME>\", maintainer_email=\"<EMAIL>\",", "Python :: Implementation :: CPython\", \"Topic :: Utilities\", ], keywords=[\"github\",", "labels for Python 3.6 and newer. 📝\", long_description=read(\"README.md\"), long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(\"src\"),", "long_description_content_type=\"text/markdown\", packages=setuptools.find_packages(\"src\"), package_dir={\"\": \"src\"}, include_package_data=True, zip_safe=False, python_requires=\">=3.6\", install_requires=[\"click\", \"requests\", \"pytoml\",", "\"Operating System :: OS Independent\", \"Programming Language :: Python ::", "for managing GitHub labels for Python 3.6 and newer. 📝\",", "Developers\", \"License :: OSI Approved :: MIT License\", \"Natural Language", ":: English\", \"Operating System :: OS Independent\", \"Programming Language ::", "Language :: English\", \"Operating System :: OS Independent\", \"Programming Language", "Only\", \"Programming Language :: Python :: 3.6\", \"Programming Language ::" ]
[ "swaps, locations and user-to-user messaging. In 0.5 this was called", "networking site. It provides profiles, friends, photos, blogs, tribes, wikis,", "It provides profiles, friends, photos, blogs, tribes, wikis, tweets, bookmarks,", "-*- coding: utf-8 -*- __about__ = \"\"\" This project demonstrates", "__about__ = \"\"\" This project demonstrates a social networking site.", "= \"\"\" This project demonstrates a social networking site. It", "tweets, bookmarks, swaps, locations and user-to-user messaging. In 0.5 this", "profiles, friends, photos, blogs, tribes, wikis, tweets, bookmarks, swaps, locations", "<gh_stars>1-10 # -*- coding: utf-8 -*- __about__ = \"\"\" This", "and user-to-user messaging. In 0.5 this was called \"complete_project\". \"\"\"", "coding: utf-8 -*- __about__ = \"\"\" This project demonstrates a", "demonstrates a social networking site. It provides profiles, friends, photos,", "# -*- coding: utf-8 -*- __about__ = \"\"\" This project", "bookmarks, swaps, locations and user-to-user messaging. In 0.5 this was", "project demonstrates a social networking site. It provides profiles, friends,", "a social networking site. It provides profiles, friends, photos, blogs,", "blogs, tribes, wikis, tweets, bookmarks, swaps, locations and user-to-user messaging.", "site. It provides profiles, friends, photos, blogs, tribes, wikis, tweets,", "wikis, tweets, bookmarks, swaps, locations and user-to-user messaging. In 0.5", "This project demonstrates a social networking site. It provides profiles,", "locations and user-to-user messaging. In 0.5 this was called \"complete_project\".", "social networking site. It provides profiles, friends, photos, blogs, tribes,", "tribes, wikis, tweets, bookmarks, swaps, locations and user-to-user messaging. In", "\"\"\" This project demonstrates a social networking site. It provides", "photos, blogs, tribes, wikis, tweets, bookmarks, swaps, locations and user-to-user", "utf-8 -*- __about__ = \"\"\" This project demonstrates a social", "friends, photos, blogs, tribes, wikis, tweets, bookmarks, swaps, locations and", "-*- __about__ = \"\"\" This project demonstrates a social networking", "provides profiles, friends, photos, blogs, tribes, wikis, tweets, bookmarks, swaps," ]
[ ") ) def all_roles(): yield '', '---------' for r in", "'---------' for v in Venture.objects.filter(show_in_ralph=True).order_by('path'): yield ( v.id, \"%s[%s] %s\"", "VentureRole def all_ventures(): yield '', '---------' for v in Venture.objects.filter(show_in_ralph=True).order_by('path'):", "yield '', '---------' for r in VentureRole.objects.order_by( '-venture__is_infrastructure', 'venture__name', 'parent__parent__name',", "from __future__ import absolute_import from __future__ import division from __future__", "coding: utf-8 -*- from __future__ import absolute_import from __future__ import", "import Venture, VentureRole def all_ventures(): yield '', '---------' for v", "unicode_literals from ralph.business.models import Venture, VentureRole def all_ventures(): yield '',", "all_roles(): yield '', '---------' for r in VentureRole.objects.order_by( '-venture__is_infrastructure', 'venture__name',", "4 * v.path.count('/'), # u00A0 == 'no-break space' v.symbol, v.name,", "space' v.symbol, v.name, ) ) def all_roles(): yield '', '---------'", "v.id, \"%s[%s] %s\" % ( '\\u00A0' * 4 * v.path.count('/'),", "from __future__ import division from __future__ import print_function from __future__", "ralph.business.models import Venture, VentureRole def all_ventures(): yield '', '---------' for", "\"%s[%s] %s\" % ( '\\u00A0' * 4 * v.path.count('/'), #", "absolute_import from __future__ import division from __future__ import print_function from", "__future__ import division from __future__ import print_function from __future__ import", "def all_roles(): yield '', '---------' for r in VentureRole.objects.order_by( '-venture__is_infrastructure',", "* 4 * v.path.count('/'), # u00A0 == 'no-break space' v.symbol,", "-*- from __future__ import absolute_import from __future__ import division from", "u00A0 == 'no-break space' v.symbol, v.name, ) ) def all_roles():", "'parent__parent__name', 'parent__name', 'name' ): yield r.id, '{} / {}'.format(r.venture.name, r.full_name)", "# u00A0 == 'no-break space' v.symbol, v.name, ) ) def", "* v.path.count('/'), # u00A0 == 'no-break space' v.symbol, v.name, )", "% ( '\\u00A0' * 4 * v.path.count('/'), # u00A0 ==", "Venture, VentureRole def all_ventures(): yield '', '---------' for v in", "'', '---------' for v in Venture.objects.filter(show_in_ralph=True).order_by('path'): yield ( v.id, \"%s[%s]", ") def all_roles(): yield '', '---------' for r in VentureRole.objects.order_by(", "yield '', '---------' for v in Venture.objects.filter(show_in_ralph=True).order_by('path'): yield ( v.id,", "import print_function from __future__ import unicode_literals from ralph.business.models import Venture,", "yield ( v.id, \"%s[%s] %s\" % ( '\\u00A0' * 4", "r in VentureRole.objects.order_by( '-venture__is_infrastructure', 'venture__name', 'parent__parent__name', 'parent__name', 'name' ): yield", "__future__ import print_function from __future__ import unicode_literals from ralph.business.models import", "import unicode_literals from ralph.business.models import Venture, VentureRole def all_ventures(): yield", "v.path.count('/'), # u00A0 == 'no-break space' v.symbol, v.name, ) )", "in VentureRole.objects.order_by( '-venture__is_infrastructure', 'venture__name', 'parent__parent__name', 'parent__name', 'name' ): yield r.id,", "%s\" % ( '\\u00A0' * 4 * v.path.count('/'), # u00A0", "in Venture.objects.filter(show_in_ralph=True).order_by('path'): yield ( v.id, \"%s[%s] %s\" % ( '\\u00A0'", "'---------' for r in VentureRole.objects.order_by( '-venture__is_infrastructure', 'venture__name', 'parent__parent__name', 'parent__name', 'name'", "-*- coding: utf-8 -*- from __future__ import absolute_import from __future__", "'', '---------' for r in VentureRole.objects.order_by( '-venture__is_infrastructure', 'venture__name', 'parent__parent__name', 'parent__name',", "v in Venture.objects.filter(show_in_ralph=True).order_by('path'): yield ( v.id, \"%s[%s] %s\" % (", "utf-8 -*- from __future__ import absolute_import from __future__ import division", "v.name, ) ) def all_roles(): yield '', '---------' for r", "'-venture__is_infrastructure', 'venture__name', 'parent__parent__name', 'parent__name', 'name' ): yield r.id, '{} /", "def all_ventures(): yield '', '---------' for v in Venture.objects.filter(show_in_ralph=True).order_by('path'): yield", "VentureRole.objects.order_by( '-venture__is_infrastructure', 'venture__name', 'parent__parent__name', 'parent__name', 'name' ): yield r.id, '{}", "from __future__ import print_function from __future__ import unicode_literals from ralph.business.models", "# -*- coding: utf-8 -*- from __future__ import absolute_import from", "( '\\u00A0' * 4 * v.path.count('/'), # u00A0 == 'no-break", "( v.id, \"%s[%s] %s\" % ( '\\u00A0' * 4 *", "'venture__name', 'parent__parent__name', 'parent__name', 'name' ): yield r.id, '{} / {}'.format(r.venture.name,", "for r in VentureRole.objects.order_by( '-venture__is_infrastructure', 'venture__name', 'parent__parent__name', 'parent__name', 'name' ):", "from __future__ import unicode_literals from ralph.business.models import Venture, VentureRole def", "from ralph.business.models import Venture, VentureRole def all_ventures(): yield '', '---------'", "import division from __future__ import print_function from __future__ import unicode_literals", "Venture.objects.filter(show_in_ralph=True).order_by('path'): yield ( v.id, \"%s[%s] %s\" % ( '\\u00A0' *", "'\\u00A0' * 4 * v.path.count('/'), # u00A0 == 'no-break space'", "v.symbol, v.name, ) ) def all_roles(): yield '', '---------' for", "== 'no-break space' v.symbol, v.name, ) ) def all_roles(): yield", "import absolute_import from __future__ import division from __future__ import print_function", "__future__ import unicode_literals from ralph.business.models import Venture, VentureRole def all_ventures():", "__future__ import absolute_import from __future__ import division from __future__ import", "for v in Venture.objects.filter(show_in_ralph=True).order_by('path'): yield ( v.id, \"%s[%s] %s\" %", "all_ventures(): yield '', '---------' for v in Venture.objects.filter(show_in_ralph=True).order_by('path'): yield (", "division from __future__ import print_function from __future__ import unicode_literals from", "'no-break space' v.symbol, v.name, ) ) def all_roles(): yield '',", "print_function from __future__ import unicode_literals from ralph.business.models import Venture, VentureRole" ]
[]
[ "your ' 'memories in ways you cannot imagine.') user4.seller_detail =", "user4.email = \"<EMAIL>\" user4.city_country = \"Singapore, SG\" user4.tags = \"Photography\"", "\"Stylist\" user2.about = '''Reimagine your looks with us''' user2.seller_detail =", "= '''We are not buying''' user5 = User( eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower()) user5.username", "\"Singapore, SG\" user5.tags = \"Bar, Restaurant\" user5.about = ('Award winnning", "print('Coverage Summary:') COV.report() basedir = os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir, 'tmp/coverage')", "def seed_db(): \"\"\" Seeds the database with some initial data", "Century\" user1.email = \"<EMAIL>\" user1.city_country = \"Singapore, SG\" user1.tags =", "coverage import os from flask.cli import FlaskGroup from project import", "user1.username = \"Meeting Room Of The Century\" user1.email = \"<EMAIL>\"", "are serving looks tonight''' user2.buyer_detail = '''We are not buying'''", "looks tonight''' user2.buyer_detail = '''We are not buying''' user3 =", "user6.email = \"<EMAIL>\" user6.city_country = \"Singapore, SG\" user6.tags = \"Performer\"", "looks with us''' user2.seller_detail = '''We are serving looks tonight'''", "= ('Award winnning winebar with the best selection of alcohol.'", "= '''We are not buying''' user4 = User( eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower()) user4.username", "the best meeting space you will ever see''' user1.seller_detail =", "user5.username = \"Epic Winebar\" user5.email = \"<EMAIL>\" user5.city_country = \"Singapore,", "\"Singapore, SG\" user4.tags = \"Photography\" user4.about = ('We are a", "result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 else: return -1", "= '''We sell wine''' user5.buyer_detail = '''We are not buying'''", "are people who like to dance alot.' 'Give us music", "\"Singapore, SG\" user6.tags = \"Performer\" user6.about = ('Dancers who dance", "code coverage \"\"\" tests = unittest.TestLoader().discover( 'project/tests', pattern='test*.py') result =", "manage.py for flask application \"\"\" import unittest import coverage import", "from project import create_app, db from project.api.models import User #", "= \"Meeting Spaces\" user1.about = '''This is the best meeting", "heart attack''' user3.seller_detail = '''We sell food''' user3.buyer_detail = '''We", "eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower()) user2.username = \"Makeup Till You Breakup\" user2.email = \"<EMAIL>\"", "will capture your ' 'memories in ways you cannot imagine.')", "with the best selection of alcohol.' 'We serve delicious international", "initial data \"\"\" user1 = User( eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower()) user1.username = \"Meeting", "always ready for' 'your craziest events.') user5.seller_detail = '''We sell", "'project/config.py', ] ) COV.start() app = create_app() cli = FlaskGroup(create_app=create_app)", "photos. ' 'We will capture your ' 'memories in ways", "'''We sell food''' user3.buyer_detail = '''We are not buying''' user4", "= \"Makeup Till You Breakup\" user2.email = \"<EMAIL>\" user2.city_country =", "dance are people who like to dance alot.' 'Give us", "\"Meeting Spaces\" user1.about = '''This is the best meeting space", "a new db \"\"\" db.drop_all() db.create_all() db.session.commit() @cli.command() def test():", "Buffet\" user3.email = \"<EMAIL>\" user3.city_country = \"Singapore, SG\" user3.tags =", "\"\"\" tests = unittest.TestLoader().discover( 'project/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if", "is the best meeting space you will ever see''' user1.seller_detail", "user2.tags = \"Stylist\" user2.about = '''Reimagine your looks with us'''", "buying''' user4 = User( eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower()) user4.username = \"Pleasant Photography\" user4.email", "project import create_app, db from project.api.models import User # Code", "= \"Singapore, SG\" user4.tags = \"Photography\" user4.about = ('We are", "dance''' user6.buyer_detail = '''We are not buying''' db.session.add(user1) db.session.add(user2) db.session.add(user3)", "all db and recreates a new db \"\"\" db.drop_all() db.create_all()", "User( eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower()) user5.username = \"Epic Winebar\" user5.email = \"<EMAIL>\" user5.city_country", "app = create_app() cli = FlaskGroup(create_app=create_app) @cli.command() def cov(): \"\"\"", "user6.seller_detail = '''We sell dance''' user6.buyer_detail = '''We are not", "= \"<EMAIL>\" user1.city_country = \"Singapore, SG\" user1.tags = \"Meeting Spaces\"", "unittest.TestLoader().discover( 'project/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0", "group of photographers specialized in wedding' 'photography. ' 'We have", "and we will dance for you.') user6.seller_detail = '''We sell", "\"Bar, Restaurant\" user5.about = ('Award winnning winebar with the best", "'''We are not buying''' user4 = User( eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower()) user4.username =", "= User( eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower()) user2.username = \"Makeup Till You Breakup\" user2.email", "= unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): COV.stop() COV.save() print('Coverage Summary:') COV.report() basedir", "create_app() cli = FlaskGroup(create_app=create_app) @cli.command() def cov(): \"\"\" Runs the", "new db \"\"\" db.drop_all() db.create_all() db.session.commit() @cli.command() def test(): \"\"\"", "test(): \"\"\" Runs test without code coverage \"\"\" tests =", "will dance for you.') user6.seller_detail = '''We sell dance''' user6.buyer_detail", "= '''We are serving looks tonight''' user2.buyer_detail = '''We are", "Winebar\" user5.email = \"<EMAIL>\" user5.city_country = \"Singapore, SG\" user5.tags =", "= \"Heart Attack Buffet\" user3.email = \"<EMAIL>\" user3.city_country = \"Singapore,", "User( eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower()) user1.username = \"Meeting Room Of The Century\" user1.email", "not buying''' db.session.add(user1) db.session.add(user2) db.session.add(user3) db.session.add(user4) db.session.add(user5) db.session.add(user6) db.session.commit() if", "os from flask.cli import FlaskGroup from project import create_app, db", "= '''Eat till you get a heart attack''' user3.seller_detail =", "not buying''' user2 = User( eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower()) user2.username = \"Makeup Till", "'''We sell photos''' user4.buyer_detail = '''We are not buying''' user5", "= ('Dancers who dance are people who like to dance", "\"Singapore, SG\" user3.tags = \"Buffet\" user3.about = '''Eat till you", "\"<EMAIL>\" user2.city_country = \"Singapore, SG\" user2.tags = \"Stylist\" user2.about =", "not buying''' user4 = User( eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower()) user4.username = \"Pleasant Photography\"", "you will ever see''' user1.seller_detail = '''We sell space''' user1.buyer_detail", "who like to dance alot.' 'Give us music and we", "user3.buyer_detail = '''We are not buying''' user4 = User( eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower())", "We are always ready for' 'your craziest events.') user5.seller_detail =", "= \"Meeting Room Of The Century\" user1.email = \"<EMAIL>\" user1.city_country", "FlaskGroup(create_app=create_app) @cli.command() def cov(): \"\"\" Runs the unit tests with", "user1.buyer_detail = '''We are not buying''' user2 = User( eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower())", "import User # Code coverage COV = coverage.Coverage( branch=True, include='project/*',", "user1.city_country = \"Singapore, SG\" user1.tags = \"Meeting Spaces\" user1.about =", "db.session.commit() @cli.command() def test(): \"\"\" Runs test without code coverage", "user2.about = '''Reimagine your looks with us''' user2.seller_detail = '''We", "specialized in wedding' 'photography. ' 'We have won numerous awards", "are not buying''' user6 = User( eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower()) user6.username = \"Dancers", "'''Eat till you get a heart attack''' user3.seller_detail = '''We", "you cannot imagine.') user4.seller_detail = '''We sell photos''' user4.buyer_detail =", "user6 = User( eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower()) user6.username = \"Dancers Who Dance\" user6.email", "sell wine''' user5.buyer_detail = '''We are not buying''' user6 =", "db.session.add(user1) db.session.add(user2) db.session.add(user3) db.session.add(user4) db.session.add(user5) db.session.add(user6) db.session.commit() if __name__ ==", "= '''This is the best meeting space you will ever", "COV = coverage.Coverage( branch=True, include='project/*', omit=[ 'project/tests/*', 'project/config.py', ] )", "= \"<EMAIL>\" user5.city_country = \"Singapore, SG\" user5.tags = \"Bar, Restaurant\"", "not buying''' user6 = User( eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower()) user6.username = \"Dancers Who", "user5.about = ('Award winnning winebar with the best selection of", "user2.email = \"<EMAIL>\" user2.city_country = \"Singapore, SG\" user2.tags = \"Stylist\"", "are not buying''' user5 = User( eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower()) user5.username = \"Epic", "'''We are not buying''' user3 = User( eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower()) user3.username =", "\"Meeting Room Of The Century\" user1.email = \"<EMAIL>\" user1.city_country =", "= User( eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower()) user4.username = \"Pleasant Photography\" user4.email = \"<EMAIL>\"", "seed_db(): \"\"\" Seeds the database with some initial data \"\"\"", "user5.tags = \"Bar, Restaurant\" user5.about = ('Award winnning winebar with", "from our travels. We are always ready for' 'your craziest", "= User( eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower()) user6.username = \"Dancers Who Dance\" user6.email =", "eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower()) user5.username = \"Epic Winebar\" user5.email = \"<EMAIL>\" user5.city_country =", "cannot imagine.') user4.seller_detail = '''We sell photos''' user4.buyer_detail = '''We", "-1 @cli.command() def recreate_db(): \"\"\" Destroys all db and recreates", "\"Singapore, SG\" user1.tags = \"Meeting Spaces\" user1.about = '''This is", "file://%s/index.html' % covdir) COV.erase() return 0 return -1 @cli.command() def", "import coverage import os from flask.cli import FlaskGroup from project", "User( eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower()) user6.username = \"Dancers Who Dance\" user6.email = \"<EMAIL>\"", "FlaskGroup from project import create_app, db from project.api.models import User", "Dance\" user6.email = \"<EMAIL>\" user6.city_country = \"Singapore, SG\" user6.tags =", "= FlaskGroup(create_app=create_app) @cli.command() def cov(): \"\"\" Runs the unit tests", "@cli.command() def cov(): \"\"\" Runs the unit tests with coverage", "db and recreates a new db \"\"\" db.drop_all() db.create_all() db.session.commit()", "serving looks tonight''' user2.buyer_detail = '''We are not buying''' user3", "craziest events.') user5.seller_detail = '''We sell wine''' user5.buyer_detail = '''We", "'We have won numerous awards for our photos. ' 'We", "user6.tags = \"Performer\" user6.about = ('Dancers who dance are people", "awards for our photos. ' 'We will capture your '", "user1 = User( eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower()) user1.username = \"Meeting Room Of The", "user5 = User( eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower()) user5.username = \"Epic Winebar\" user5.email =", "our photos. ' 'We will capture your ' 'memories in", "import os from flask.cli import FlaskGroup from project import create_app,", "create_app, db from project.api.models import User # Code coverage COV", "the unit tests with coverage \"\"\" tests = unittest.TestLoader().discover('project/tests') result", "SG\" user5.tags = \"Bar, Restaurant\" user5.about = ('Award winnning winebar", "Of The Century\" user1.email = \"<EMAIL>\" user1.city_country = \"Singapore, SG\"", "winnning winebar with the best selection of alcohol.' 'We serve", "user2.buyer_detail = '''We are not buying''' user3 = User( eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower())", "user4.about = ('We are a group of photographers specialized in", "\"\"\" Runs the unit tests with coverage \"\"\" tests =", "Runs the unit tests with coverage \"\"\" tests = unittest.TestLoader().discover('project/tests')", "unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): COV.stop() COV.save() print('Coverage Summary:') COV.report() basedir =", "@cli.command() def recreate_db(): \"\"\" Destroys all db and recreates a", "omit=[ 'project/tests/*', 'project/config.py', ] ) COV.start() app = create_app() cli", "\"Heart Attack Buffet\" user3.email = \"<EMAIL>\" user3.city_country = \"Singapore, SG\"", "'project/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 else:", "= \"Singapore, SG\" user1.tags = \"Meeting Spaces\" user1.about = '''This", "return 0 return -1 @cli.command() def recreate_db(): \"\"\" Destroys all", "wedding' 'photography. ' 'We have won numerous awards for our", "db.drop_all() db.create_all() db.session.commit() @cli.command() def test(): \"\"\" Runs test without", "= ('We are a group of photographers specialized in wedding'", "won numerous awards for our photos. ' 'We will capture", "= '''We are not buying''' user6 = User( eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower()) user6.username", "without code coverage \"\"\" tests = unittest.TestLoader().discover( 'project/tests', pattern='test*.py') result", "Restaurant\" user5.about = ('Award winnning winebar with the best selection", "meeting space you will ever see''' user1.seller_detail = '''We sell", "us music and we will dance for you.') user6.seller_detail =", "\"\"\" manage.py for flask application \"\"\" import unittest import coverage", "unittest.TestLoader().discover('project/tests') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): COV.stop() COV.save() print('Coverage Summary:')", "user2 = User( eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower()) user2.username = \"Makeup Till You Breakup\"", "'''We are not buying''' db.session.add(user1) db.session.add(user2) db.session.add(user3) db.session.add(user4) db.session.add(user5) db.session.add(user6)", "dance alot.' 'Give us music and we will dance for", "\"\"\" Destroys all db and recreates a new db \"\"\"", "ready for' 'your craziest events.') user5.seller_detail = '''We sell wine'''", "of photographers specialized in wedding' 'photography. ' 'We have won", "cov(): \"\"\" Runs the unit tests with coverage \"\"\" tests", "events.') user5.seller_detail = '''We sell wine''' user5.buyer_detail = '''We are", "sell dance''' user6.buyer_detail = '''We are not buying''' db.session.add(user1) db.session.add(user2)", "for you.') user6.seller_detail = '''We sell dance''' user6.buyer_detail = '''We", "the best selection of alcohol.' 'We serve delicious international cuisine,", "Attack Buffet\" user3.email = \"<EMAIL>\" user3.city_country = \"Singapore, SG\" user3.tags", "= \"Singapore, SG\" user3.tags = \"Buffet\" user3.about = '''Eat till", "import unittest import coverage import os from flask.cli import FlaskGroup", "coverage \"\"\" tests = unittest.TestLoader().discover( 'project/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests)", "coverage.Coverage( branch=True, include='project/*', omit=[ 'project/tests/*', 'project/config.py', ] ) COV.start() app", "= \"Singapore, SG\" user2.tags = \"Stylist\" user2.about = '''Reimagine your", "you.') user6.seller_detail = '''We sell dance''' user6.buyer_detail = '''We are", "tonight''' user2.buyer_detail = '''We are not buying''' user3 = User(", "'photography. ' 'We have won numerous awards for our photos.", "who dance are people who like to dance alot.' 'Give", "\"Dancers Who Dance\" user6.email = \"<EMAIL>\" user6.city_country = \"Singapore, SG\"", "Summary:') COV.report() basedir = os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir)", "'''We are not buying''' user2 = User( eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower()) user2.username =", "db.session.add(user2) db.session.add(user3) db.session.add(user4) db.session.add(user5) db.session.add(user6) db.session.commit() if __name__ == '__main__':", "'''We sell wine''' user5.buyer_detail = '''We are not buying''' user6", "User( eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower()) user4.username = \"Pleasant Photography\" user4.email = \"<EMAIL>\" user4.city_country", "alcohol.' 'We serve delicious international cuisine, with fusion' 'dishes inspired", "selection of alcohol.' 'We serve delicious international cuisine, with fusion'", "user4 = User( eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower()) user4.username = \"Pleasant Photography\" user4.email =", "cuisine, with fusion' 'dishes inspired from our travels. We are", "import create_app, db from project.api.models import User # Code coverage", "\"\"\" Runs test without code coverage \"\"\" tests = unittest.TestLoader().discover(", "eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower()) user6.username = \"Dancers Who Dance\" user6.email = \"<EMAIL>\" user6.city_country", "user3.seller_detail = '''We sell food''' user3.buyer_detail = '''We are not", "= \"Singapore, SG\" user5.tags = \"Bar, Restaurant\" user5.about = ('Award", "0 return -1 @cli.command() def recreate_db(): \"\"\" Destroys all db", "user6.username = \"Dancers Who Dance\" user6.email = \"<EMAIL>\" user6.city_country =", "sell space''' user1.buyer_detail = '''We are not buying''' user2 =", "if result.wasSuccessful(): return 0 else: return -1 @cli.command() def seed_db():", "= '''We are not buying''' user2 = User( eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower()) user2.username", "unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 else: return -1 @cli.command() def", "result.wasSuccessful(): return 0 else: return -1 @cli.command() def seed_db(): \"\"\"", "\"\"\" tests = unittest.TestLoader().discover('project/tests') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): COV.stop()", "buying''' user3 = User( eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower()) user3.username = \"Heart Attack Buffet\"", "import FlaskGroup from project import create_app, db from project.api.models import", "\"<EMAIL>\" user5.city_country = \"Singapore, SG\" user5.tags = \"Bar, Restaurant\" user5.about", "= \"<EMAIL>\" user6.city_country = \"Singapore, SG\" user6.tags = \"Performer\" user6.about", "os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir) print('HTML version: file://%s/index.html' %", "in ways you cannot imagine.') user4.seller_detail = '''We sell photos'''", "eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower()) user3.username = \"Heart Attack Buffet\" user3.email = \"<EMAIL>\" user3.city_country", "version: file://%s/index.html' % covdir) COV.erase() return 0 return -1 @cli.command()", "user3.username = \"Heart Attack Buffet\" user3.email = \"<EMAIL>\" user3.city_country =", "'your craziest events.') user5.seller_detail = '''We sell wine''' user5.buyer_detail =", "user2.username = \"Makeup Till You Breakup\" user2.email = \"<EMAIL>\" user2.city_country", "= \"Singapore, SG\" user6.tags = \"Performer\" user6.about = ('Dancers who", "tests = unittest.TestLoader().discover( 'project/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful():", "\"<EMAIL>\" user1.city_country = \"Singapore, SG\" user1.tags = \"Meeting Spaces\" user1.about", "eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower()) user4.username = \"Pleasant Photography\" user4.email = \"<EMAIL>\" user4.city_country =", "buying''' user6 = User( eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower()) user6.username = \"Dancers Who Dance\"", "@cli.command() def seed_db(): \"\"\" Seeds the database with some initial", "user5.email = \"<EMAIL>\" user5.city_country = \"Singapore, SG\" user5.tags = \"Bar,", "best selection of alcohol.' 'We serve delicious international cuisine, with", "unittest import coverage import os from flask.cli import FlaskGroup from", "] ) COV.start() app = create_app() cli = FlaskGroup(create_app=create_app) @cli.command()", "= unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 else: return -1 @cli.command()", "will ever see''' user1.seller_detail = '''We sell space''' user1.buyer_detail =", "to dance alot.' 'Give us music and we will dance", "= \"Epic Winebar\" user5.email = \"<EMAIL>\" user5.city_country = \"Singapore, SG\"", "0 else: return -1 @cli.command() def seed_db(): \"\"\" Seeds the", "= '''We are not buying''' db.session.add(user1) db.session.add(user2) db.session.add(user3) db.session.add(user4) db.session.add(user5)", "winebar with the best selection of alcohol.' 'We serve delicious", "not buying''' user3 = User( eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower()) user3.username = \"Heart Attack", "for our photos. ' 'We will capture your ' 'memories", "photos''' user4.buyer_detail = '''We are not buying''' user5 = User(", "SG\" user2.tags = \"Stylist\" user2.about = '''Reimagine your looks with", "db.session.add(user3) db.session.add(user4) db.session.add(user5) db.session.add(user6) db.session.commit() if __name__ == '__main__': cli()", "COV.erase() return 0 return -1 @cli.command() def recreate_db(): \"\"\" Destroys", "SG\" user3.tags = \"Buffet\" user3.about = '''Eat till you get", "= '''We are not buying''' user3 = User( eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower()) user3.username", "Spaces\" user1.about = '''This is the best meeting space you", "fusion' 'dishes inspired from our travels. We are always ready", "'''This is the best meeting space you will ever see'''", "('We are a group of photographers specialized in wedding' 'photography.", "'''We sell space''' user1.buyer_detail = '''We are not buying''' user2", "your looks with us''' user2.seller_detail = '''We are serving looks", "space you will ever see''' user1.seller_detail = '''We sell space'''", "space''' user1.buyer_detail = '''We are not buying''' user2 = User(", "= '''Reimagine your looks with us''' user2.seller_detail = '''We are", "\"<EMAIL>\" user4.city_country = \"Singapore, SG\" user4.tags = \"Photography\" user4.about =", "\"\"\" user1 = User( eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower()) user1.username = \"Meeting Room Of", "food''' user3.buyer_detail = '''We are not buying''' user4 = User(", "coverage \"\"\" tests = unittest.TestLoader().discover('project/tests') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful():", "user5.city_country = \"Singapore, SG\" user5.tags = \"Bar, Restaurant\" user5.about =", "\"\"\" db.drop_all() db.create_all() db.session.commit() @cli.command() def test(): \"\"\" Runs test", "= \"<EMAIL>\" user4.city_country = \"Singapore, SG\" user4.tags = \"Photography\" user4.about", ") COV.start() app = create_app() cli = FlaskGroup(create_app=create_app) @cli.command() def", "Destroys all db and recreates a new db \"\"\" db.drop_all()", "else: return -1 @cli.command() def seed_db(): \"\"\" Seeds the database", "recreate_db(): \"\"\" Destroys all db and recreates a new db", "music and we will dance for you.') user6.seller_detail = '''We", "us''' user2.seller_detail = '''We are serving looks tonight''' user2.buyer_detail =", "= '''We sell photos''' user4.buyer_detail = '''We are not buying'''", "wine''' user5.buyer_detail = '''We are not buying''' user6 = User(", "'We will capture your ' 'memories in ways you cannot", "You Breakup\" user2.email = \"<EMAIL>\" user2.city_country = \"Singapore, SG\" user2.tags", "'''We are not buying''' user5 = User( eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower()) user5.username =", "basedir = os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir) print('HTML version:", "Runs test without code coverage \"\"\" tests = unittest.TestLoader().discover( 'project/tests',", "= unittest.TestLoader().discover( 'project/tests', pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return", "inspired from our travels. We are always ready for' 'your", "= \"Performer\" user6.about = ('Dancers who dance are people who", "= \"Dancers Who Dance\" user6.email = \"<EMAIL>\" user6.city_country = \"Singapore,", "= '''We sell food''' user3.buyer_detail = '''We are not buying'''", "\"<EMAIL>\" user3.city_country = \"Singapore, SG\" user3.tags = \"Buffet\" user3.about =", "def test(): \"\"\" Runs test without code coverage \"\"\" tests", "of alcohol.' 'We serve delicious international cuisine, with fusion' 'dishes", "user4.tags = \"Photography\" user4.about = ('We are a group of", "\"\"\" import unittest import coverage import os from flask.cli import", "buying''' db.session.add(user1) db.session.add(user2) db.session.add(user3) db.session.add(user4) db.session.add(user5) db.session.add(user6) db.session.commit() if __name__", "= User( eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower()) user5.username = \"Epic Winebar\" user5.email = \"<EMAIL>\"", "('Award winnning winebar with the best selection of alcohol.' 'We", "user4.buyer_detail = '''We are not buying''' user5 = User( eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower())", "@cli.command() def test(): \"\"\" Runs test without code coverage \"\"\"", "% covdir) COV.erase() return 0 return -1 @cli.command() def recreate_db():", "user5.seller_detail = '''We sell wine''' user5.buyer_detail = '''We are not", "= User( eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower()) user3.username = \"Heart Attack Buffet\" user3.email =", "with fusion' 'dishes inspired from our travels. We are always", "pattern='test*.py') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): return 0 else: return", "flask application \"\"\" import unittest import coverage import os from", "recreates a new db \"\"\" db.drop_all() db.create_all() db.session.commit() @cli.command() def", "with coverage \"\"\" tests = unittest.TestLoader().discover('project/tests') result = unittest.TextTestRunner(verbosity=2).run(tests) if", "'project/tests/*', 'project/config.py', ] ) COV.start() app = create_app() cli =", "are not buying''' db.session.add(user1) db.session.add(user2) db.session.add(user3) db.session.add(user4) db.session.add(user5) db.session.add(user6) db.session.commit()", "The Century\" user1.email = \"<EMAIL>\" user1.city_country = \"Singapore, SG\" user1.tags", "with us''' user2.seller_detail = '''We are serving looks tonight''' user2.buyer_detail", "'''We are not buying''' user6 = User( eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower()) user6.username =", "user4.seller_detail = '''We sell photos''' user4.buyer_detail = '''We are not", "User # Code coverage COV = coverage.Coverage( branch=True, include='project/*', omit=[", "SG\" user1.tags = \"Meeting Spaces\" user1.about = '''This is the", "user2.city_country = \"Singapore, SG\" user2.tags = \"Stylist\" user2.about = '''Reimagine", "\"Epic Winebar\" user5.email = \"<EMAIL>\" user5.city_country = \"Singapore, SG\" user5.tags", "COV.html_report(directory=covdir) print('HTML version: file://%s/index.html' % covdir) COV.erase() return 0 return", "if result.wasSuccessful(): COV.stop() COV.save() print('Coverage Summary:') COV.report() basedir = os.path.abspath(os.path.dirname(__file__))", "are not buying''' user2 = User( eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower()) user2.username = \"Makeup", "user4.city_country = \"Singapore, SG\" user4.tags = \"Photography\" user4.about = ('We", "COV.stop() COV.save() print('Coverage Summary:') COV.report() basedir = os.path.abspath(os.path.dirname(__file__)) covdir =", "sell food''' user3.buyer_detail = '''We are not buying''' user4 =", "result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): COV.stop() COV.save() print('Coverage Summary:') COV.report()", "# Code coverage COV = coverage.Coverage( branch=True, include='project/*', omit=[ 'project/tests/*',", "Photography\" user4.email = \"<EMAIL>\" user4.city_country = \"Singapore, SG\" user4.tags =", "and recreates a new db \"\"\" db.drop_all() db.create_all() db.session.commit() @cli.command()", "with some initial data \"\"\" user1 = User( eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower()) user1.username", "user6.city_country = \"Singapore, SG\" user6.tags = \"Performer\" user6.about = ('Dancers", "('Dancers who dance are people who like to dance alot.'", "cli = FlaskGroup(create_app=create_app) @cli.command() def cov(): \"\"\" Runs the unit", "'''We sell dance''' user6.buyer_detail = '''We are not buying''' db.session.add(user1)", "def recreate_db(): \"\"\" Destroys all db and recreates a new", "'''Reimagine your looks with us''' user2.seller_detail = '''We are serving", "= unittest.TestLoader().discover('project/tests') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): COV.stop() COV.save() print('Coverage", "database with some initial data \"\"\" user1 = User( eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower())", "sell photos''' user4.buyer_detail = '''We are not buying''' user5 =", "Room Of The Century\" user1.email = \"<EMAIL>\" user1.city_country = \"Singapore,", "a heart attack''' user3.seller_detail = '''We sell food''' user3.buyer_detail =", "for' 'your craziest events.') user5.seller_detail = '''We sell wine''' user5.buyer_detail", "print('HTML version: file://%s/index.html' % covdir) COV.erase() return 0 return -1", "covdir = os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir) print('HTML version: file://%s/index.html' % covdir)", "db \"\"\" db.drop_all() db.create_all() db.session.commit() @cli.command() def test(): \"\"\" Runs", "= coverage.Coverage( branch=True, include='project/*', omit=[ 'project/tests/*', 'project/config.py', ] ) COV.start()", "Breakup\" user2.email = \"<EMAIL>\" user2.city_country = \"Singapore, SG\" user2.tags =", "our travels. We are always ready for' 'your craziest events.')", "covdir) COV.erase() return 0 return -1 @cli.command() def recreate_db(): \"\"\"", "user3.tags = \"Buffet\" user3.about = '''Eat till you get a", "have won numerous awards for our photos. ' 'We will", "test without code coverage \"\"\" tests = unittest.TestLoader().discover( 'project/tests', pattern='test*.py')", "def cov(): \"\"\" Runs the unit tests with coverage \"\"\"", "user1.seller_detail = '''We sell space''' user1.buyer_detail = '''We are not", "in wedding' 'photography. ' 'We have won numerous awards for", "\"Pleasant Photography\" user4.email = \"<EMAIL>\" user4.city_country = \"Singapore, SG\" user4.tags", "= os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir) print('HTML version: file://%s/index.html'", "= \"Stylist\" user2.about = '''Reimagine your looks with us''' user2.seller_detail", "Code coverage COV = coverage.Coverage( branch=True, include='project/*', omit=[ 'project/tests/*', 'project/config.py',", "user3.city_country = \"Singapore, SG\" user3.tags = \"Buffet\" user3.about = '''Eat", "= \"Pleasant Photography\" user4.email = \"<EMAIL>\" user4.city_country = \"Singapore, SG\"", "= \"Buffet\" user3.about = '''Eat till you get a heart", "get a heart attack''' user3.seller_detail = '''We sell food''' user3.buyer_detail", "delicious international cuisine, with fusion' 'dishes inspired from our travels.", "'tmp/coverage') COV.html_report(directory=covdir) print('HTML version: file://%s/index.html' % covdir) COV.erase() return 0", "= '''We sell dance''' user6.buyer_detail = '''We are not buying'''", "are not buying''' user4 = User( eth_address='0x6ea57F562Ef39f1776eb66D91c54A961Fa6DdadA'.lower()) user4.username = \"Pleasant", "coverage COV = coverage.Coverage( branch=True, include='project/*', omit=[ 'project/tests/*', 'project/config.py', ]", "numerous awards for our photos. ' 'We will capture your", "user4.username = \"Pleasant Photography\" user4.email = \"<EMAIL>\" user4.city_country = \"Singapore,", "imagine.') user4.seller_detail = '''We sell photos''' user4.buyer_detail = '''We are", "= User( eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower()) user1.username = \"Meeting Room Of The Century\"", "= \"<EMAIL>\" user3.city_country = \"Singapore, SG\" user3.tags = \"Buffet\" user3.about", "db from project.api.models import User # Code coverage COV =", "project.api.models import User # Code coverage COV = coverage.Coverage( branch=True,", "from project.api.models import User # Code coverage COV = coverage.Coverage(", "user3 = User( eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower()) user3.username = \"Heart Attack Buffet\" user3.email", "user1.tags = \"Meeting Spaces\" user1.about = '''This is the best", "some initial data \"\"\" user1 = User( eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower()) user1.username =", "tests = unittest.TestLoader().discover('project/tests') result = unittest.TextTestRunner(verbosity=2).run(tests) if result.wasSuccessful(): COV.stop() COV.save()", "see''' user1.seller_detail = '''We sell space''' user1.buyer_detail = '''We are", "a group of photographers specialized in wedding' 'photography. ' 'We", "result.wasSuccessful(): COV.stop() COV.save() print('Coverage Summary:') COV.report() basedir = os.path.abspath(os.path.dirname(__file__)) covdir", "ways you cannot imagine.') user4.seller_detail = '''We sell photos''' user4.buyer_detail", "like to dance alot.' 'Give us music and we will", "' 'We will capture your ' 'memories in ways you", "= '''We sell space''' user1.buyer_detail = '''We are not buying'''", "' 'We have won numerous awards for our photos. '", "buying''' user2 = User( eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower()) user2.username = \"Makeup Till You", "serve delicious international cuisine, with fusion' 'dishes inspired from our", "tests with coverage \"\"\" tests = unittest.TestLoader().discover('project/tests') result = unittest.TextTestRunner(verbosity=2).run(tests)", "User( eth_address='0xF4675187bD8B058CcF87f7116b54970fC3f81b52'.lower()) user2.username = \"Makeup Till You Breakup\" user2.email =", "COV.save() print('Coverage Summary:') COV.report() basedir = os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir,", "SG\" user6.tags = \"Performer\" user6.about = ('Dancers who dance are", "capture your ' 'memories in ways you cannot imagine.') user4.seller_detail", "db.create_all() db.session.commit() @cli.command() def test(): \"\"\" Runs test without code", "' 'memories in ways you cannot imagine.') user4.seller_detail = '''We", "SG\" user4.tags = \"Photography\" user4.about = ('We are a group", "international cuisine, with fusion' 'dishes inspired from our travels. We", "application \"\"\" import unittest import coverage import os from flask.cli", "= create_app() cli = FlaskGroup(create_app=create_app) @cli.command() def cov(): \"\"\" Runs", "ever see''' user1.seller_detail = '''We sell space''' user1.buyer_detail = '''We", "user2.seller_detail = '''We are serving looks tonight''' user2.buyer_detail = '''We", "till you get a heart attack''' user3.seller_detail = '''We sell", "return 0 else: return -1 @cli.command() def seed_db(): \"\"\" Seeds", "'''We are serving looks tonight''' user2.buyer_detail = '''We are not", "return -1 @cli.command() def seed_db(): \"\"\" Seeds the database with", "\"Photography\" user4.about = ('We are a group of photographers specialized", "'Give us music and we will dance for you.') user6.seller_detail", "return -1 @cli.command() def recreate_db(): \"\"\" Destroys all db and", "user5.buyer_detail = '''We are not buying''' user6 = User( eth_address='0x50E9002d238d9a2A29C3047971E8006663A9d799'.lower())", "\"\"\" Seeds the database with some initial data \"\"\" user1", "COV.start() app = create_app() cli = FlaskGroup(create_app=create_app) @cli.command() def cov():", "os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir) print('HTML version: file://%s/index.html' % covdir) COV.erase() return", "\"Makeup Till You Breakup\" user2.email = \"<EMAIL>\" user2.city_country = \"Singapore,", "Till You Breakup\" user2.email = \"<EMAIL>\" user2.city_country = \"Singapore, SG\"", "'dishes inspired from our travels. We are always ready for'", "= \"Bar, Restaurant\" user5.about = ('Award winnning winebar with the", "\"Singapore, SG\" user2.tags = \"Stylist\" user2.about = '''Reimagine your looks", "flask.cli import FlaskGroup from project import create_app, db from project.api.models", "-1 @cli.command() def seed_db(): \"\"\" Seeds the database with some", "user3.about = '''Eat till you get a heart attack''' user3.seller_detail", "we will dance for you.') user6.seller_detail = '''We sell dance'''", "alot.' 'Give us music and we will dance for you.')", "\"<EMAIL>\" user6.city_country = \"Singapore, SG\" user6.tags = \"Performer\" user6.about =", "user6.buyer_detail = '''We are not buying''' db.session.add(user1) db.session.add(user2) db.session.add(user3) db.session.add(user4)", "include='project/*', omit=[ 'project/tests/*', 'project/config.py', ] ) COV.start() app = create_app()", "user1.email = \"<EMAIL>\" user1.city_country = \"Singapore, SG\" user1.tags = \"Meeting", "photographers specialized in wedding' 'photography. ' 'We have won numerous", "travels. We are always ready for' 'your craziest events.') user5.seller_detail", "are always ready for' 'your craziest events.') user5.seller_detail = '''We", "you get a heart attack''' user3.seller_detail = '''We sell food'''", "not buying''' user5 = User( eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower()) user5.username = \"Epic Winebar\"", "dance for you.') user6.seller_detail = '''We sell dance''' user6.buyer_detail =", "from flask.cli import FlaskGroup from project import create_app, db from", "are not buying''' user3 = User( eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower()) user3.username = \"Heart", "user6.about = ('Dancers who dance are people who like to", "best meeting space you will ever see''' user1.seller_detail = '''We", "user3.email = \"<EMAIL>\" user3.city_country = \"Singapore, SG\" user3.tags = \"Buffet\"", "buying''' user5 = User( eth_address='0x04Ee2da68b909684d586a852970E424981f30928'.lower()) user5.username = \"Epic Winebar\" user5.email", "'We serve delicious international cuisine, with fusion' 'dishes inspired from", "are a group of photographers specialized in wedding' 'photography. '", "Seeds the database with some initial data \"\"\" user1 =", "\"Buffet\" user3.about = '''Eat till you get a heart attack'''", "Who Dance\" user6.email = \"<EMAIL>\" user6.city_country = \"Singapore, SG\" user6.tags", "'memories in ways you cannot imagine.') user4.seller_detail = '''We sell", "= os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir) print('HTML version: file://%s/index.html' % covdir) COV.erase()", "for flask application \"\"\" import unittest import coverage import os", "branch=True, include='project/*', omit=[ 'project/tests/*', 'project/config.py', ] ) COV.start() app =", "eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower()) user1.username = \"Meeting Room Of The Century\" user1.email =", "user1.about = '''This is the best meeting space you will", "the database with some initial data \"\"\" user1 = User(", "= \"<EMAIL>\" user2.city_country = \"Singapore, SG\" user2.tags = \"Stylist\" user2.about", "= \"Photography\" user4.about = ('We are a group of photographers", "data \"\"\" user1 = User( eth_address='0x0d604C28A2a7c199c7705859c3f88A71cCE2aCb7'.lower()) user1.username = \"Meeting Room", "attack''' user3.seller_detail = '''We sell food''' user3.buyer_detail = '''We are", "\"Performer\" user6.about = ('Dancers who dance are people who like", "people who like to dance alot.' 'Give us music and", "COV.report() basedir = os.path.abspath(os.path.dirname(__file__)) covdir = os.path.join(basedir, 'tmp/coverage') COV.html_report(directory=covdir) print('HTML", "unit tests with coverage \"\"\" tests = unittest.TestLoader().discover('project/tests') result =", "User( eth_address='0x4FaE992a476bB00Be85B7BF76fef8e27DE2231C7'.lower()) user3.username = \"Heart Attack Buffet\" user3.email = \"<EMAIL>\"" ]
[ "= logs.get(self.monitor) if current == self.best: should_save = True else:", "+= 1 self.update_artifacts() should_save = False if self.epochs_since_last_save == 0:", "should_save = False if self.epochs_since_last_save == 0: if self.save_best_only: current", "= False if self.epochs_since_last_save == 0: if self.save_best_only: current =", "self._artifacts[\"callbacks\"][self.get_name()] = self.get_artifacts() def get_name(self): return self.__class__.__name__ def get_artifacts(self): return", "from keras_transformer.utils.io_utils import save_to_pickle class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC): def __init__(self, workspace_path,", "should_save = True else: should_save = True if should_save: save_to_pickle(self._artifacts,", "\".pkl\")) def update_artifacts(self): for callback in self._callbacks: self._artifacts[\"callbacks\"][callback.get_name()] = callback.get_artifacts()", "get_name(self): return self.__class__.__name__ def get_artifacts(self): return {\"best_score\": self.best, \"completed_epoch\": self._completed_epoch}", "keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC from keras_transformer.utils.io_utils import save_to_pickle class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC):", "update_artifacts(self): for callback in self._callbacks: self._artifacts[\"callbacks\"][callback.get_name()] = callback.get_artifacts() self._artifacts[\"callbacks\"][self.get_name()] =", "self._completed_epoch} def prepare_from_artifacts(self, artifacts): self.best = artifacts[\"best_score\"] self._completed_epoch = artifacts[\"completed_epoch\"]", "if self.epochs_since_last_save == 0: if self.save_best_only: current = logs.get(self.monitor) if", "CustomCheckpointer(ModelCheckpoint, CustomCallbackABC): def __init__(self, workspace_path, artifacts, callbacks, **kwargs): super().__init__(os.path.join(workspace_path, \"model-{epoch:01d}.h5\"),", "if should_save: save_to_pickle(self._artifacts, os.path.join(self._workspace_path, \"artifacts-\" + str(epoch+1) + \".pkl\")) def", "def on_epoch_end(self, epoch, logs=None): super().on_epoch_end(epoch, logs) self._completed_epoch += 1 self.update_artifacts()", "from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC from keras_transformer.utils.io_utils import save_to_pickle class CustomCheckpointer(ModelCheckpoint,", "**kwargs): super().__init__(os.path.join(workspace_path, \"model-{epoch:01d}.h5\"), **kwargs) self._workspace_path = workspace_path self._artifacts = artifacts", "self._workspace_path = workspace_path self._artifacts = artifacts self._completed_epoch = 0 self._callbacks", "self.save_best_only: current = logs.get(self.monitor) if current == self.best: should_save =", "from keras.callbacks import ModelCheckpoint from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC from keras_transformer.utils.io_utils", "__init__(self, workspace_path, artifacts, callbacks, **kwargs): super().__init__(os.path.join(workspace_path, \"model-{epoch:01d}.h5\"), **kwargs) self._workspace_path =", "True else: should_save = True if should_save: save_to_pickle(self._artifacts, os.path.join(self._workspace_path, \"artifacts-\"", "+ str(epoch+1) + \".pkl\")) def update_artifacts(self): for callback in self._callbacks:", "CustomCallbackABC): def __init__(self, workspace_path, artifacts, callbacks, **kwargs): super().__init__(os.path.join(workspace_path, \"model-{epoch:01d}.h5\"), **kwargs)", "self._artifacts = artifacts self._completed_epoch = 0 self._callbacks = callbacks def", "import ModelCheckpoint from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC from keras_transformer.utils.io_utils import save_to_pickle", "self._callbacks = callbacks def on_epoch_end(self, epoch, logs=None): super().on_epoch_end(epoch, logs) self._completed_epoch", "str(epoch+1) + \".pkl\")) def update_artifacts(self): for callback in self._callbacks: self._artifacts[\"callbacks\"][callback.get_name()]", "if current == self.best: should_save = True else: should_save =", "for callback in self._callbacks: self._artifacts[\"callbacks\"][callback.get_name()] = callback.get_artifacts() self._artifacts[\"callbacks\"][self.get_name()] = self.get_artifacts()", "False if self.epochs_since_last_save == 0: if self.save_best_only: current = logs.get(self.monitor)", "get_artifacts(self): return {\"best_score\": self.best, \"completed_epoch\": self._completed_epoch} def prepare_from_artifacts(self, artifacts): self.best", "CustomCallbackABC from keras_transformer.utils.io_utils import save_to_pickle class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC): def __init__(self,", "os.path.join(self._workspace_path, \"artifacts-\" + str(epoch+1) + \".pkl\")) def update_artifacts(self): for callback", "self.__class__.__name__ def get_artifacts(self): return {\"best_score\": self.best, \"completed_epoch\": self._completed_epoch} def prepare_from_artifacts(self,", "self._completed_epoch += 1 self.update_artifacts() should_save = False if self.epochs_since_last_save ==", "**kwargs) self._workspace_path = workspace_path self._artifacts = artifacts self._completed_epoch = 0", "\"model-{epoch:01d}.h5\"), **kwargs) self._workspace_path = workspace_path self._artifacts = artifacts self._completed_epoch =", "workspace_path self._artifacts = artifacts self._completed_epoch = 0 self._callbacks = callbacks", "import CustomCallbackABC from keras_transformer.utils.io_utils import save_to_pickle class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC): def", "\"artifacts-\" + str(epoch+1) + \".pkl\")) def update_artifacts(self): for callback in", "should_save = True if should_save: save_to_pickle(self._artifacts, os.path.join(self._workspace_path, \"artifacts-\" + str(epoch+1)", "class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC): def __init__(self, workspace_path, artifacts, callbacks, **kwargs): super().__init__(os.path.join(workspace_path,", "import os from keras.callbacks import ModelCheckpoint from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC", "<gh_stars>1-10 import os from keras.callbacks import ModelCheckpoint from keras_transformer.training.custom_callbacks.CustomCallbackABC import", "if self.save_best_only: current = logs.get(self.monitor) if current == self.best: should_save", "return {\"best_score\": self.best, \"completed_epoch\": self._completed_epoch} def prepare_from_artifacts(self, artifacts): self.best =", "True if should_save: save_to_pickle(self._artifacts, os.path.join(self._workspace_path, \"artifacts-\" + str(epoch+1) + \".pkl\"))", "{\"best_score\": self.best, \"completed_epoch\": self._completed_epoch} def prepare_from_artifacts(self, artifacts): self.best = artifacts[\"best_score\"]", "artifacts self._completed_epoch = 0 self._callbacks = callbacks def on_epoch_end(self, epoch,", "0: if self.save_best_only: current = logs.get(self.monitor) if current == self.best:", "current == self.best: should_save = True else: should_save = True", "workspace_path, artifacts, callbacks, **kwargs): super().__init__(os.path.join(workspace_path, \"model-{epoch:01d}.h5\"), **kwargs) self._workspace_path = workspace_path", "super().__init__(os.path.join(workspace_path, \"model-{epoch:01d}.h5\"), **kwargs) self._workspace_path = workspace_path self._artifacts = artifacts self._completed_epoch", "def get_artifacts(self): return {\"best_score\": self.best, \"completed_epoch\": self._completed_epoch} def prepare_from_artifacts(self, artifacts):", "self.get_artifacts() def get_name(self): return self.__class__.__name__ def get_artifacts(self): return {\"best_score\": self.best,", "== 0: if self.save_best_only: current = logs.get(self.monitor) if current ==", "self.update_artifacts() should_save = False if self.epochs_since_last_save == 0: if self.save_best_only:", "keras.callbacks import ModelCheckpoint from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC from keras_transformer.utils.io_utils import", "save_to_pickle class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC): def __init__(self, workspace_path, artifacts, callbacks, **kwargs):", "current = logs.get(self.monitor) if current == self.best: should_save = True", "self._callbacks: self._artifacts[\"callbacks\"][callback.get_name()] = callback.get_artifacts() self._artifacts[\"callbacks\"][self.get_name()] = self.get_artifacts() def get_name(self): return", "+ \".pkl\")) def update_artifacts(self): for callback in self._callbacks: self._artifacts[\"callbacks\"][callback.get_name()] =", "super().on_epoch_end(epoch, logs) self._completed_epoch += 1 self.update_artifacts() should_save = False if", "self.best, \"completed_epoch\": self._completed_epoch} def prepare_from_artifacts(self, artifacts): self.best = artifacts[\"best_score\"] self._completed_epoch", "def update_artifacts(self): for callback in self._callbacks: self._artifacts[\"callbacks\"][callback.get_name()] = callback.get_artifacts() self._artifacts[\"callbacks\"][self.get_name()]", "0 self._callbacks = callbacks def on_epoch_end(self, epoch, logs=None): super().on_epoch_end(epoch, logs)", "os from keras.callbacks import ModelCheckpoint from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC from", "= callback.get_artifacts() self._artifacts[\"callbacks\"][self.get_name()] = self.get_artifacts() def get_name(self): return self.__class__.__name__ def", "= True if should_save: save_to_pickle(self._artifacts, os.path.join(self._workspace_path, \"artifacts-\" + str(epoch+1) +", "def __init__(self, workspace_path, artifacts, callbacks, **kwargs): super().__init__(os.path.join(workspace_path, \"model-{epoch:01d}.h5\"), **kwargs) self._workspace_path", "= workspace_path self._artifacts = artifacts self._completed_epoch = 0 self._callbacks =", "artifacts, callbacks, **kwargs): super().__init__(os.path.join(workspace_path, \"model-{epoch:01d}.h5\"), **kwargs) self._workspace_path = workspace_path self._artifacts", "logs.get(self.monitor) if current == self.best: should_save = True else: should_save", "= callbacks def on_epoch_end(self, epoch, logs=None): super().on_epoch_end(epoch, logs) self._completed_epoch +=", "epoch, logs=None): super().on_epoch_end(epoch, logs) self._completed_epoch += 1 self.update_artifacts() should_save =", "logs) self._completed_epoch += 1 self.update_artifacts() should_save = False if self.epochs_since_last_save", "= self.get_artifacts() def get_name(self): return self.__class__.__name__ def get_artifacts(self): return {\"best_score\":", "= 0 self._callbacks = callbacks def on_epoch_end(self, epoch, logs=None): super().on_epoch_end(epoch,", "= True else: should_save = True if should_save: save_to_pickle(self._artifacts, os.path.join(self._workspace_path,", "self.best: should_save = True else: should_save = True if should_save:", "import save_to_pickle class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC): def __init__(self, workspace_path, artifacts, callbacks,", "\"completed_epoch\": self._completed_epoch} def prepare_from_artifacts(self, artifacts): self.best = artifacts[\"best_score\"] self._completed_epoch =", "== self.best: should_save = True else: should_save = True if", "callback.get_artifacts() self._artifacts[\"callbacks\"][self.get_name()] = self.get_artifacts() def get_name(self): return self.__class__.__name__ def get_artifacts(self):", "def get_name(self): return self.__class__.__name__ def get_artifacts(self): return {\"best_score\": self.best, \"completed_epoch\":", "return self.__class__.__name__ def get_artifacts(self): return {\"best_score\": self.best, \"completed_epoch\": self._completed_epoch} def", "self._artifacts[\"callbacks\"][callback.get_name()] = callback.get_artifacts() self._artifacts[\"callbacks\"][self.get_name()] = self.get_artifacts() def get_name(self): return self.__class__.__name__", "callbacks def on_epoch_end(self, epoch, logs=None): super().on_epoch_end(epoch, logs) self._completed_epoch += 1", "ModelCheckpoint from keras_transformer.training.custom_callbacks.CustomCallbackABC import CustomCallbackABC from keras_transformer.utils.io_utils import save_to_pickle class", "in self._callbacks: self._artifacts[\"callbacks\"][callback.get_name()] = callback.get_artifacts() self._artifacts[\"callbacks\"][self.get_name()] = self.get_artifacts() def get_name(self):", "= artifacts self._completed_epoch = 0 self._callbacks = callbacks def on_epoch_end(self,", "self.epochs_since_last_save == 0: if self.save_best_only: current = logs.get(self.monitor) if current", "logs=None): super().on_epoch_end(epoch, logs) self._completed_epoch += 1 self.update_artifacts() should_save = False", "callbacks, **kwargs): super().__init__(os.path.join(workspace_path, \"model-{epoch:01d}.h5\"), **kwargs) self._workspace_path = workspace_path self._artifacts =", "keras_transformer.utils.io_utils import save_to_pickle class CustomCheckpointer(ModelCheckpoint, CustomCallbackABC): def __init__(self, workspace_path, artifacts,", "self._completed_epoch = 0 self._callbacks = callbacks def on_epoch_end(self, epoch, logs=None):", "else: should_save = True if should_save: save_to_pickle(self._artifacts, os.path.join(self._workspace_path, \"artifacts-\" +", "save_to_pickle(self._artifacts, os.path.join(self._workspace_path, \"artifacts-\" + str(epoch+1) + \".pkl\")) def update_artifacts(self): for", "1 self.update_artifacts() should_save = False if self.epochs_since_last_save == 0: if", "on_epoch_end(self, epoch, logs=None): super().on_epoch_end(epoch, logs) self._completed_epoch += 1 self.update_artifacts() should_save", "callback in self._callbacks: self._artifacts[\"callbacks\"][callback.get_name()] = callback.get_artifacts() self._artifacts[\"callbacks\"][self.get_name()] = self.get_artifacts() def", "should_save: save_to_pickle(self._artifacts, os.path.join(self._workspace_path, \"artifacts-\" + str(epoch+1) + \".pkl\")) def update_artifacts(self):" ]
[ "= os.path.join(input_dir, 'Labels') out_img_dir = os.path.join(output_dir, 'images') out_label_dir = os.path.join(output_dir,", "splits = {'train','test','valid'} files = os.listdir(in_img_dir) count = len(files) for", "'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5' output_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5\\ZhitangYolo5' in_img_dir = os.path.join(input_dir, 'Images') in_label_dir =", "count = len(files) for f in files: print(f) src =", "import os import shutil input_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5' output_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5\\ZhitangYolo5'", "import shutil input_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5' output_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5\\ZhitangYolo5' in_img_dir =", "os.path.join(input_dir, 'Images') in_label_dir = os.path.join(input_dir, 'Labels') out_img_dir = os.path.join(output_dir, 'images')", "= os.path.join(output_dir, 'images') out_label_dir = os.path.join(output_dir, 'labels') splits = {'train','test','valid'}", "os.path.join(output_dir, 'labels') splits = {'train','test','valid'} files = os.listdir(in_img_dir) count =", "for f in files: print(f) src = os.path.join(input_dir,f) shutil.copyfile(src, dst)", "out_label_dir = os.path.join(output_dir, 'labels') splits = {'train','test','valid'} files = os.listdir(in_img_dir)", "os import shutil input_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5' output_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5\\ZhitangYolo5' in_img_dir", "files = os.listdir(in_img_dir) count = len(files) for f in files:", "len(files) for f in files: print(f) src = os.path.join(input_dir,f) shutil.copyfile(src,", "shutil input_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5' output_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5\\ZhitangYolo5' in_img_dir = os.path.join(input_dir,", "= 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5' output_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5\\ZhitangYolo5' in_img_dir = os.path.join(input_dir, 'Images') in_label_dir", "out_img_dir = os.path.join(output_dir, 'images') out_label_dir = os.path.join(output_dir, 'labels') splits =", "= os.path.join(input_dir, 'Images') in_label_dir = os.path.join(input_dir, 'Labels') out_img_dir = os.path.join(output_dir,", "'Images') in_label_dir = os.path.join(input_dir, 'Labels') out_img_dir = os.path.join(output_dir, 'images') out_label_dir", "= len(files) for f in files: print(f) src = os.path.join(input_dir,f)", "os.listdir(in_img_dir) count = len(files) for f in files: print(f) src", "'Labels') out_img_dir = os.path.join(output_dir, 'images') out_label_dir = os.path.join(output_dir, 'labels') splits", "{'train','test','valid'} files = os.listdir(in_img_dir) count = len(files) for f in", "= os.path.join(output_dir, 'labels') splits = {'train','test','valid'} files = os.listdir(in_img_dir) count", "in_label_dir = os.path.join(input_dir, 'Labels') out_img_dir = os.path.join(output_dir, 'images') out_label_dir =", "'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5\\ZhitangYolo5' in_img_dir = os.path.join(input_dir, 'Images') in_label_dir = os.path.join(input_dir, 'Labels') out_img_dir", "'labels') splits = {'train','test','valid'} files = os.listdir(in_img_dir) count = len(files)", "os.path.join(input_dir, 'Labels') out_img_dir = os.path.join(output_dir, 'images') out_label_dir = os.path.join(output_dir, 'labels')", "in_img_dir = os.path.join(input_dir, 'Images') in_label_dir = os.path.join(input_dir, 'Labels') out_img_dir =", "os.path.join(output_dir, 'images') out_label_dir = os.path.join(output_dir, 'labels') splits = {'train','test','valid'} files", "'images') out_label_dir = os.path.join(output_dir, 'labels') splits = {'train','test','valid'} files =", "= os.listdir(in_img_dir) count = len(files) for f in files: print(f)", "= {'train','test','valid'} files = os.listdir(in_img_dir) count = len(files) for f", "= 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5\\ZhitangYolo5' in_img_dir = os.path.join(input_dir, 'Images') in_label_dir = os.path.join(input_dir, 'Labels')", "input_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5' output_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5\\ZhitangYolo5' in_img_dir = os.path.join(input_dir, 'Images')", "output_dir = 'E:\\Dataset\\zhitang\\Dataset_Zhitang_Yolo5\\ZhitangYolo5' in_img_dir = os.path.join(input_dir, 'Images') in_label_dir = os.path.join(input_dir," ]
[ "DEFAULT_PORT = 4352 DEFAULT_ENCODING = 'utf-8' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST):", "self._encoding = encoding self._muted = False self._pwstate = STATE_OFF self._current_source", "voluptuous as vol from homeassistant.components.media_player import ( PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF,", "the device.\"\"\" return self._pwstate @property def is_volume_muted(self): \"\"\"Return boolean indicating", "CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON) import homeassistant.helpers.config_validation as cv REQUIREMENTS =", "\"\"\"Get the latest state from the device.\"\"\" with self.projector() as", "source(self): \"\"\"Return current input source.\"\"\" return self._current_source @property def source_list(self):", "= None with self.projector() as projector: if not self._name: self._name", "self._current_source = None with self.projector() as projector: if not self._name:", "self._source_list = sorted(self._source_name_mapping.keys()) def projector(self): \"\"\"Create PJLink Projector instance.\"\"\" from", "self.projector() as projector: projector.set_power('on') def mute_volume(self, mute): \"\"\"Mute (true) of", "the PJLink platform.\"\"\" host = config.get(CONF_HOST) port = config.get(CONF_PORT) name", "device = PjLinkDevice(host, port, name, encoding, password) hass_data[device_label] = device", "self._encoding) projector.authenticate(self._password) return projector def update(self): \"\"\"Get the latest state", "name self._password = password self._encoding = encoding self._muted = False", "Projector.from_address( self._host, self._port, self._encoding) projector.authenticate(self._password) return projector def update(self): \"\"\"Get", "= ['pypjlink2==1.2.0'] _LOGGER = logging.getLogger(__name__) CONF_ENCODING = 'encoding' DEFAULT_PORT =", "import MUTE_AUDIO projector.set_mute(MUTE_AUDIO, mute) def select_source(self, source): \"\"\"Set the input", "the name of the device.\"\"\" return self._name @property def state(self):", "name = config.get(CONF_NAME) encoding = config.get(CONF_ENCODING) password = config.get(CONF_PASSWORD) if", "= \\ {format_input_source(*x): x for x in inputs} self._source_list =", "encoding, password): \"\"\"Iinitialize the PJLink device.\"\"\" self._host = host self._port", "SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \\ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE", "projector.get_mute()[1] self._current_source = \\ format_input_source(*projector.get_input()) @property def name(self): \"\"\"Return the", "of unmute (false) media player.\"\"\" with self.projector() as projector: from", "projector(self): \"\"\"Create PJLink Projector instance.\"\"\" from pypjlink import Projector projector", "cv.string, }) SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \\ SUPPORT_TURN_ON | SUPPORT_TURN_OFF", "projector off.\"\"\" with self.projector() as projector: projector.set_power('off') def turn_on(self): \"\"\"Turn", "= password self._encoding = encoding self._muted = False self._pwstate =", "pypjlink import MUTE_AUDIO projector.set_mute(MUTE_AUDIO, mute) def select_source(self, source): \"\"\"Set the", "STATE_OFF else: self._pwstate = STATE_ON self._muted = projector.get_mute()[1] self._current_source =", "\"\"\"Return the name of the device.\"\"\" return self._name @property def", "from pypjlink import MUTE_AUDIO projector.set_mute(MUTE_AUDIO, mute) def select_source(self, source): \"\"\"Set", "| \\ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE def setup_platform(hass, config,", "setup_platform(hass, config, add_entities, discovery_info=None): \"\"\"Set up the PJLink platform.\"\"\" host", "port = config.get(CONF_PORT) name = config.get(CONF_NAME) encoding = config.get(CONF_ENCODING) password", "config, add_entities, discovery_info=None): \"\"\"Set up the PJLink platform.\"\"\" host =", "a PJLink device.\"\"\" def __init__(self, host, port, name, encoding, password):", "name, encoding, password) hass_data[device_label] = device add_entities([device], True) def format_input_source(input_source_name,", "as projector: projector.set_power('on') def mute_volume(self, mute): \"\"\"Mute (true) of unmute", "MUTE_AUDIO projector.set_mute(MUTE_AUDIO, mute) def select_source(self, source): \"\"\"Set the input source.\"\"\"", "features.\"\"\" return SUPPORT_PJLINK def turn_off(self): \"\"\"Turn projector off.\"\"\" with self.projector()", "refer to the documentation at https://home-assistant.io/components/media_player.pjlink/ \"\"\" import logging import", "hass_data[device_label] = device add_entities([device], True) def format_input_source(input_source_name, input_source_number): \"\"\"Format input", "PJLink Projector instance.\"\"\" from pypjlink import Projector projector = Projector.from_address(", "projector.authenticate(self._password) return projector def update(self): \"\"\"Get the latest state from", "projector.set_mute(MUTE_AUDIO, mute) def select_source(self, source): \"\"\"Set the input source.\"\"\" source", "device.\"\"\" self._host = host self._port = port self._name = name", "projector = Projector.from_address( self._host, self._port, self._encoding) projector.authenticate(self._password) return projector def", "status.\"\"\" return self._muted @property def source(self): \"\"\"Return current input source.\"\"\"", "password self._encoding = encoding self._muted = False self._pwstate = STATE_OFF", "the input source.\"\"\" source = self._source_name_mapping[source] with self.projector() as projector:", "= config.get(CONF_ENCODING) password = config.get(CONF_PASSWORD) if 'pjlink' not in hass.data:", "= logging.getLogger(__name__) CONF_ENCODING = 'encoding' DEFAULT_PORT = 4352 DEFAULT_ENCODING =", "off.\"\"\" with self.projector() as projector: projector.set_power('off') def turn_on(self): \"\"\"Turn projector", "logging import voluptuous as vol from homeassistant.components.media_player import ( PLATFORM_SCHEMA,", "= 4352 DEFAULT_ENCODING = 'utf-8' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string,", "= PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING,", "encoding = config.get(CONF_ENCODING) password = config.get(CONF_PASSWORD) if 'pjlink' not in", "config.get(CONF_ENCODING) password = config.get(CONF_PASSWORD) if 'pjlink' not in hass.data: hass.data['pjlink']", "\"\"\"Return current input source.\"\"\" return self._current_source @property def source_list(self): \"\"\"Return", "state(self): \"\"\"Return the state of the device.\"\"\" return self._pwstate @property", "device_label in hass_data: return device = PjLinkDevice(host, port, name, encoding,", "port self._name = name self._password = password self._encoding = encoding", "DEFAULT_ENCODING = 'utf-8' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT):", "self._port, self._encoding) projector.authenticate(self._password) return projector def update(self): \"\"\"Get the latest", "class PjLinkDevice(MediaPlayerDevice): \"\"\"Representation of a PJLink device.\"\"\" def __init__(self, host,", "projector.get_inputs() self._source_name_mapping = \\ {format_input_source(*x): x for x in inputs}", "= host self._port = port self._name = name self._password =", "in inputs} self._source_list = sorted(self._source_name_mapping.keys()) def projector(self): \"\"\"Create PJLink Projector", "return self._muted @property def source(self): \"\"\"Return current input source.\"\"\" return", "return self._name @property def state(self): \"\"\"Return the state of the", "state from the device.\"\"\" with self.projector() as projector: pwstate =", "device.\"\"\" with self.projector() as projector: pwstate = projector.get_power() if pwstate", "in hass_data: return device = PjLinkDevice(host, port, name, encoding, password)", "import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pypjlink2==1.2.0'] _LOGGER = logging.getLogger(__name__)", "source): \"\"\"Set the input source.\"\"\" source = self._source_name_mapping[source] with self.projector()", "with self.projector() as projector: projector.set_power('off') def turn_on(self): \"\"\"Turn projector on.\"\"\"", "def __init__(self, host, port, name, encoding, password): \"\"\"Iinitialize the PJLink", "MediaPlayerDevice) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF,", "__init__(self, host, port, name, encoding, password): \"\"\"Iinitialize the PJLink device.\"\"\"", "self._host = host self._port = port self._name = name self._password", "self._source_list @property def supported_features(self): \"\"\"Return projector supported features.\"\"\" return SUPPORT_PJLINK", "on.\"\"\" with self.projector() as projector: projector.set_power('on') def mute_volume(self, mute): \"\"\"Mute", "import voluptuous as vol from homeassistant.components.media_player import ( PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE,", "= 'utf-8' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,", "port, name, encoding, password): \"\"\"Iinitialize the PJLink device.\"\"\" self._host =", "== 'off': self._pwstate = STATE_OFF else: self._pwstate = STATE_ON self._muted", "PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING):", "not in hass.data: hass.data['pjlink'] = {} hass_data = hass.data['pjlink'] device_label", "from pypjlink import Projector projector = Projector.from_address( self._host, self._port, self._encoding)", "platform.\"\"\" host = config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME)", "cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD):", "= PjLinkDevice(host, port, name, encoding, password) hass_data[device_label] = device add_entities([device],", "\"\"\"Return the state of the device.\"\"\" return self._pwstate @property def", "indicating mute status.\"\"\" return self._muted @property def source(self): \"\"\"Return current", "False self._pwstate = STATE_OFF self._current_source = None with self.projector() as", "projector: if not self._name: self._name = projector.get_name() inputs = projector.get_inputs()", "\\ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE def setup_platform(hass, config, add_entities,", "the PJLink protocol. For more details about this platform, please", "homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pypjlink2==1.2.0'] _LOGGER = logging.getLogger(__name__) CONF_ENCODING", "self._source_name_mapping = \\ {format_input_source(*x): x for x in inputs} self._source_list", "['pypjlink2==1.2.0'] _LOGGER = logging.getLogger(__name__) CONF_ENCODING = 'encoding' DEFAULT_PORT = 4352", "of a PJLink device.\"\"\" def __init__(self, host, port, name, encoding,", "the device.\"\"\" with self.projector() as projector: pwstate = projector.get_power() if", "@property def name(self): \"\"\"Return the name of the device.\"\"\" return", "vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD): cv.string,", "select_source(self, source): \"\"\"Set the input source.\"\"\" source = self._source_name_mapping[source] with", "def state(self): \"\"\"Return the state of the device.\"\"\" return self._pwstate", "format_input_source(input_source_name, input_source_number): \"\"\"Format input source for display in UI.\"\"\" return", "is_volume_muted(self): \"\"\"Return boolean indicating mute status.\"\"\" return self._muted @property def", "else: self._pwstate = STATE_ON self._muted = projector.get_mute()[1] self._current_source = \\", "\"\"\"Return projector supported features.\"\"\" return SUPPORT_PJLINK def turn_off(self): \"\"\"Turn projector", "\\ format_input_source(*projector.get_input()) @property def name(self): \"\"\"Return the name of the", "\"\"\"Format input source for display in UI.\"\"\" return \"{} {}\".format(input_source_name,", "= projector.get_inputs() self._source_name_mapping = \\ {format_input_source(*x): x for x in", "default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD): cv.string, }) SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \\", "PJLink protocol. For more details about this platform, please refer", "= \"{}:{}\".format(host, port) if device_label in hass_data: return device =", "self._name: self._name = projector.get_name() inputs = projector.get_inputs() self._source_name_mapping = \\", "inputs = projector.get_inputs() self._source_name_mapping = \\ {format_input_source(*x): x for x", "projector supported features.\"\"\" return SUPPORT_PJLINK def turn_off(self): \"\"\"Turn projector off.\"\"\"", "with self.projector() as projector: if not self._name: self._name = projector.get_name()", "via the PJLink protocol. For more details about this platform,", "config.get(CONF_PORT) name = config.get(CONF_NAME) encoding = config.get(CONF_ENCODING) password = config.get(CONF_PASSWORD)", "= projector.get_power() if pwstate == 'off': self._pwstate = STATE_OFF else:", "= \\ format_input_source(*projector.get_input()) @property def name(self): \"\"\"Return the name of", "projector.set_power('on') def mute_volume(self, mute): \"\"\"Mute (true) of unmute (false) media", "def update(self): \"\"\"Get the latest state from the device.\"\"\" with", "\"\"\"Set up the PJLink platform.\"\"\" host = config.get(CONF_HOST) port =", "input_source_number) class PjLinkDevice(MediaPlayerDevice): \"\"\"Representation of a PJLink device.\"\"\" def __init__(self,", "all available input sources.\"\"\" return self._source_list @property def supported_features(self): \"\"\"Return", "config.get(CONF_NAME) encoding = config.get(CONF_ENCODING) password = config.get(CONF_PASSWORD) if 'pjlink' not", "SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, MediaPlayerDevice) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD,", "for controlling projector via the PJLink protocol. For more details", "PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME): cv.string,", "self._current_source = \\ format_input_source(*projector.get_input()) @property def name(self): \"\"\"Return the name", "_LOGGER = logging.getLogger(__name__) CONF_ENCODING = 'encoding' DEFAULT_PORT = 4352 DEFAULT_ENCODING", "= Projector.from_address( self._host, self._port, self._encoding) projector.authenticate(self._password) return projector def update(self):", "update(self): \"\"\"Get the latest state from the device.\"\"\" with self.projector()", "hass.data['pjlink'] device_label = \"{}:{}\".format(host, port) if device_label in hass_data: return", "def projector(self): \"\"\"Create PJLink Projector instance.\"\"\" from pypjlink import Projector", "turn_on(self): \"\"\"Turn projector on.\"\"\" with self.projector() as projector: projector.set_power('on') def", "self._muted = False self._pwstate = STATE_OFF self._current_source = None with", "Support for controlling projector via the PJLink protocol. For more", "projector: pwstate = projector.get_power() if pwstate == 'off': self._pwstate =", "@property def source_list(self): \"\"\"Return all available input sources.\"\"\" return self._source_list", "def source_list(self): \"\"\"Return all available input sources.\"\"\" return self._source_list @property", "def setup_platform(hass, config, add_entities, discovery_info=None): \"\"\"Set up the PJLink platform.\"\"\"", "format_input_source(*projector.get_input()) @property def name(self): \"\"\"Return the name of the device.\"\"\"", "source for display in UI.\"\"\" return \"{} {}\".format(input_source_name, input_source_number) class", "x for x in inputs} self._source_list = sorted(self._source_name_mapping.keys()) def projector(self):", "homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON) import", "CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON) import homeassistant.helpers.config_validation as cv", "device_label = \"{}:{}\".format(host, port) if device_label in hass_data: return device", "logging.getLogger(__name__) CONF_ENCODING = 'encoding' DEFAULT_PORT = 4352 DEFAULT_ENCODING = 'utf-8'", "as vol from homeassistant.components.media_player import ( PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON,", "if 'pjlink' not in hass.data: hass.data['pjlink'] = {} hass_data =", "@property def state(self): \"\"\"Return the state of the device.\"\"\" return", "None with self.projector() as projector: if not self._name: self._name =", "PjLinkDevice(host, port, name, encoding, password) hass_data[device_label] = device add_entities([device], True)", "}) SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \\ SUPPORT_TURN_ON | SUPPORT_TURN_OFF |", "more details about this platform, please refer to the documentation", "password = config.get(CONF_PASSWORD) if 'pjlink' not in hass.data: hass.data['pjlink'] =", "password): \"\"\"Iinitialize the PJLink device.\"\"\" self._host = host self._port =", "port) if device_label in hass_data: return device = PjLinkDevice(host, port,", "vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string,", "of the device.\"\"\" return self._name @property def state(self): \"\"\"Return the", "STATE_ON) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pypjlink2==1.2.0'] _LOGGER =", "= sorted(self._source_name_mapping.keys()) def projector(self): \"\"\"Create PJLink Projector instance.\"\"\" from pypjlink", "platform, please refer to the documentation at https://home-assistant.io/components/media_player.pjlink/ \"\"\" import", "encoding, password) hass_data[device_label] = device add_entities([device], True) def format_input_source(input_source_name, input_source_number):", "\"\"\"Mute (true) of unmute (false) media player.\"\"\" with self.projector() as", "turn_off(self): \"\"\"Turn projector off.\"\"\" with self.projector() as projector: projector.set_power('off') def", "PJLink platform.\"\"\" host = config.get(CONF_HOST) port = config.get(CONF_PORT) name =", "= device add_entities([device], True) def format_input_source(input_source_name, input_source_number): \"\"\"Format input source", "= name self._password = password self._encoding = encoding self._muted =", "as projector: if not self._name: self._name = projector.get_name() inputs =", "self._pwstate = STATE_ON self._muted = projector.get_mute()[1] self._current_source = \\ format_input_source(*projector.get_input())", "@property def source(self): \"\"\"Return current input source.\"\"\" return self._current_source @property", "with self.projector() as projector: pwstate = projector.get_power() if pwstate ==", "pwstate = projector.get_power() if pwstate == 'off': self._pwstate = STATE_OFF", "the PJLink device.\"\"\" self._host = host self._port = port self._name", "default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD): cv.string, })", "of the device.\"\"\" return self._pwstate @property def is_volume_muted(self): \"\"\"Return boolean", "input_source_number): \"\"\"Format input source for display in UI.\"\"\" return \"{}", "as cv REQUIREMENTS = ['pypjlink2==1.2.0'] _LOGGER = logging.getLogger(__name__) CONF_ENCODING =", "= 'encoding' DEFAULT_PORT = 4352 DEFAULT_ENCODING = 'utf-8' PLATFORM_SCHEMA =", "about this platform, please refer to the documentation at https://home-assistant.io/components/media_player.pjlink/", "\"\"\"Representation of a PJLink device.\"\"\" def __init__(self, host, port, name,", "self._name = projector.get_name() inputs = projector.get_inputs() self._source_name_mapping = \\ {format_input_source(*x):", "= SUPPORT_VOLUME_MUTE | \\ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE def", "source.\"\"\" return self._current_source @property def source_list(self): \"\"\"Return all available input", "return SUPPORT_PJLINK def turn_off(self): \"\"\"Turn projector off.\"\"\" with self.projector() as", "projector on.\"\"\" with self.projector() as projector: projector.set_power('on') def mute_volume(self, mute):", "the state of the device.\"\"\" return self._pwstate @property def is_volume_muted(self):", "STATE_OFF, STATE_ON) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pypjlink2==1.2.0'] _LOGGER", "up the PJLink platform.\"\"\" host = config.get(CONF_HOST) port = config.get(CONF_PORT)", "password) hass_data[device_label] = device add_entities([device], True) def format_input_source(input_source_name, input_source_number): \"\"\"Format", "hass.data['pjlink'] = {} hass_data = hass.data['pjlink'] device_label = \"{}:{}\".format(host, port)", "\\ {format_input_source(*x): x for x in inputs} self._source_list = sorted(self._source_name_mapping.keys())", "SUPPORT_PJLINK def turn_off(self): \"\"\"Turn projector off.\"\"\" with self.projector() as projector:", "projector: projector.set_power('on') def mute_volume(self, mute): \"\"\"Mute (true) of unmute (false)", "media player.\"\"\" with self.projector() as projector: from pypjlink import MUTE_AUDIO", "'utf-8' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port, vol.Optional(CONF_NAME):", "def turn_off(self): \"\"\"Turn projector off.\"\"\" with self.projector() as projector: projector.set_power('off')", "STATE_OFF self._current_source = None with self.projector() as projector: if not", "= STATE_ON self._muted = projector.get_mute()[1] self._current_source = \\ format_input_source(*projector.get_input()) @property", "import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON) import homeassistant.helpers.config_validation", "= {} hass_data = hass.data['pjlink'] device_label = \"{}:{}\".format(host, port) if", "def turn_on(self): \"\"\"Turn projector on.\"\"\" with self.projector() as projector: projector.set_power('on')", "device add_entities([device], True) def format_input_source(input_source_name, input_source_number): \"\"\"Format input source for", "def source(self): \"\"\"Return current input source.\"\"\" return self._current_source @property def", "vol.Optional(CONF_PASSWORD): cv.string, }) SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \\ SUPPORT_TURN_ON |", "mute) def select_source(self, source): \"\"\"Set the input source.\"\"\" source =", "vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD): cv.string, }) SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE |", "{} hass_data = hass.data['pjlink'] device_label = \"{}:{}\".format(host, port) if device_label", "vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD): cv.string, }) SUPPORT_PJLINK =", "\"\"\"Iinitialize the PJLink device.\"\"\" self._host = host self._port = port", "boolean indicating mute status.\"\"\" return self._muted @property def source(self): \"\"\"Return", "'off': self._pwstate = STATE_OFF else: self._pwstate = STATE_ON self._muted =", "the documentation at https://home-assistant.io/components/media_player.pjlink/ \"\"\" import logging import voluptuous as", "host, port, name, encoding, password): \"\"\"Iinitialize the PJLink device.\"\"\" self._host", "input sources.\"\"\" return self._source_list @property def supported_features(self): \"\"\"Return projector supported", "mute_volume(self, mute): \"\"\"Mute (true) of unmute (false) media player.\"\"\" with", "mute status.\"\"\" return self._muted @property def source(self): \"\"\"Return current input", "device.\"\"\" return self._name @property def state(self): \"\"\"Return the state of", "\"\"\"Turn projector on.\"\"\" with self.projector() as projector: projector.set_power('on') def mute_volume(self,", "with self.projector() as projector: projector.set_power('on') def mute_volume(self, mute): \"\"\"Mute (true)", "UI.\"\"\" return \"{} {}\".format(input_source_name, input_source_number) class PjLinkDevice(MediaPlayerDevice): \"\"\"Representation of a", "def format_input_source(input_source_name, input_source_number): \"\"\"Format input source for display in UI.\"\"\"", "details about this platform, please refer to the documentation at", "return self._pwstate @property def is_volume_muted(self): \"\"\"Return boolean indicating mute status.\"\"\"", "'encoding' DEFAULT_PORT = 4352 DEFAULT_ENCODING = 'utf-8' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({", "\"\"\"Turn projector off.\"\"\" with self.projector() as projector: projector.set_power('off') def turn_on(self):", "current input source.\"\"\" return self._current_source @property def source_list(self): \"\"\"Return all", "= config.get(CONF_PASSWORD) if 'pjlink' not in hass.data: hass.data['pjlink'] = {}", "projector: from pypjlink import MUTE_AUDIO projector.set_mute(MUTE_AUDIO, mute) def select_source(self, source):", "True) def format_input_source(input_source_name, input_source_number): \"\"\"Format input source for display in", "if not self._name: self._name = projector.get_name() inputs = projector.get_inputs() self._source_name_mapping", "config.get(CONF_PASSWORD) if 'pjlink' not in hass.data: hass.data['pjlink'] = {} hass_data", "CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON) import homeassistant.helpers.config_validation as cv REQUIREMENTS", "source_list(self): \"\"\"Return all available input sources.\"\"\" return self._source_list @property def", "name, encoding, password): \"\"\"Iinitialize the PJLink device.\"\"\" self._host = host", "SUPPORT_VOLUME_MUTE, MediaPlayerDevice) from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT,", "| SUPPORT_SELECT_SOURCE def setup_platform(hass, config, add_entities, discovery_info=None): \"\"\"Set up the", "projector def update(self): \"\"\"Get the latest state from the device.\"\"\"", "(true) of unmute (false) media player.\"\"\" with self.projector() as projector:", "= encoding self._muted = False self._pwstate = STATE_OFF self._current_source =", "4352 DEFAULT_ENCODING = 'utf-8' PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend({ vol.Required(CONF_HOST): cv.string, vol.Optional(CONF_PORT,", "| SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE def setup_platform(hass, config, add_entities, discovery_info=None): \"\"\"Set", "self._name = name self._password = password self._encoding = encoding self._muted", "host = config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME) encoding", "return self._source_list @property def supported_features(self): \"\"\"Return projector supported features.\"\"\" return", "player.\"\"\" with self.projector() as projector: from pypjlink import MUTE_AUDIO projector.set_mute(MUTE_AUDIO,", "= config.get(CONF_NAME) encoding = config.get(CONF_ENCODING) password = config.get(CONF_PASSWORD) if 'pjlink'", "= STATE_OFF self._current_source = None with self.projector() as projector: if", "PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, MediaPlayerDevice) from homeassistant.const import (", "= port self._name = name self._password = password self._encoding =", "import Projector projector = Projector.from_address( self._host, self._port, self._encoding) projector.authenticate(self._password) return", "as projector: projector.set_power('off') def turn_on(self): \"\"\"Turn projector on.\"\"\" with self.projector()", "available input sources.\"\"\" return self._source_list @property def supported_features(self): \"\"\"Return projector", "return projector def update(self): \"\"\"Get the latest state from the", "projector.set_power('off') def turn_on(self): \"\"\"Turn projector on.\"\"\" with self.projector() as projector:", "sorted(self._source_name_mapping.keys()) def projector(self): \"\"\"Create PJLink Projector instance.\"\"\" from pypjlink import", "\"\"\"Return boolean indicating mute status.\"\"\" return self._muted @property def source(self):", "import logging import voluptuous as vol from homeassistant.components.media_player import (", "CONF_ENCODING = 'encoding' DEFAULT_PORT = 4352 DEFAULT_ENCODING = 'utf-8' PLATFORM_SCHEMA", "controlling projector via the PJLink protocol. For more details about", "\"{} {}\".format(input_source_name, input_source_number) class PjLinkDevice(MediaPlayerDevice): \"\"\"Representation of a PJLink device.\"\"\"", "if pwstate == 'off': self._pwstate = STATE_OFF else: self._pwstate =", "self._current_source @property def source_list(self): \"\"\"Return all available input sources.\"\"\" return", "input source.\"\"\" return self._current_source @property def source_list(self): \"\"\"Return all available", "STATE_ON self._muted = projector.get_mute()[1] self._current_source = \\ format_input_source(*projector.get_input()) @property def", "def mute_volume(self, mute): \"\"\"Mute (true) of unmute (false) media player.\"\"\"", "SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE def setup_platform(hass, config, add_entities, discovery_info=None):", "self._muted @property def source(self): \"\"\"Return current input source.\"\"\" return self._current_source", "= STATE_OFF else: self._pwstate = STATE_ON self._muted = projector.get_mute()[1] self._current_source", "input source.\"\"\" source = self._source_name_mapping[source] with self.projector() as projector: projector.set_input(*source)", "vol from homeassistant.components.media_player import ( PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE,", "= projector.get_name() inputs = projector.get_inputs() self._source_name_mapping = \\ {format_input_source(*x): x", "pypjlink import Projector projector = Projector.from_address( self._host, self._port, self._encoding) projector.authenticate(self._password)", "self._pwstate = STATE_OFF else: self._pwstate = STATE_ON self._muted = projector.get_mute()[1]", "from homeassistant.components.media_player import ( PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, MediaPlayerDevice)", "SUPPORT_VOLUME_MUTE | \\ SUPPORT_TURN_ON | SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE def setup_platform(hass,", "to the documentation at https://home-assistant.io/components/media_player.pjlink/ \"\"\" import logging import voluptuous", "PJLink device.\"\"\" self._host = host self._port = port self._name =", "self.projector() as projector: projector.set_power('off') def turn_on(self): \"\"\"Turn projector on.\"\"\" with", "supported_features(self): \"\"\"Return projector supported features.\"\"\" return SUPPORT_PJLINK def turn_off(self): \"\"\"Turn", "PJLink device.\"\"\" def __init__(self, host, port, name, encoding, password): \"\"\"Iinitialize", "self._name @property def state(self): \"\"\"Return the state of the device.\"\"\"", "@property def is_volume_muted(self): \"\"\"Return boolean indicating mute status.\"\"\" return self._muted", "REQUIREMENTS = ['pypjlink2==1.2.0'] _LOGGER = logging.getLogger(__name__) CONF_ENCODING = 'encoding' DEFAULT_PORT", "CONF_PORT, STATE_OFF, STATE_ON) import homeassistant.helpers.config_validation as cv REQUIREMENTS = ['pypjlink2==1.2.0']", "'pjlink' not in hass.data: hass.data['pjlink'] = {} hass_data = hass.data['pjlink']", "config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME) encoding = config.get(CONF_ENCODING)", "def is_volume_muted(self): \"\"\"Return boolean indicating mute status.\"\"\" return self._muted @property", "Projector instance.\"\"\" from pypjlink import Projector projector = Projector.from_address( self._host,", "device.\"\"\" return self._pwstate @property def is_volume_muted(self): \"\"\"Return boolean indicating mute", "as projector: from pypjlink import MUTE_AUDIO projector.set_mute(MUTE_AUDIO, mute) def select_source(self,", "cv.port, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD): cv.string, }) SUPPORT_PJLINK", "cv.string, vol.Optional(CONF_PASSWORD): cv.string, }) SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE | \\ SUPPORT_TURN_ON", "self._password = password self._encoding = encoding self._muted = False self._pwstate", "unmute (false) media player.\"\"\" with self.projector() as projector: from pypjlink", "host self._port = port self._name = name self._password = password", "in hass.data: hass.data['pjlink'] = {} hass_data = hass.data['pjlink'] device_label =", "def name(self): \"\"\"Return the name of the device.\"\"\" return self._name", "name of the device.\"\"\" return self._name @property def state(self): \"\"\"Return", "projector.get_name() inputs = projector.get_inputs() self._source_name_mapping = \\ {format_input_source(*x): x for", "in UI.\"\"\" return \"{} {}\".format(input_source_name, input_source_number) class PjLinkDevice(MediaPlayerDevice): \"\"\"Representation of", "For more details about this platform, please refer to the", "import ( PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, MediaPlayerDevice) from homeassistant.const", "homeassistant.components.media_player import ( PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, MediaPlayerDevice) from", "self._muted = projector.get_mute()[1] self._current_source = \\ format_input_source(*projector.get_input()) @property def name(self):", "for x in inputs} self._source_list = sorted(self._source_name_mapping.keys()) def projector(self): \"\"\"Create", "the device.\"\"\" return self._name @property def state(self): \"\"\"Return the state", "input source for display in UI.\"\"\" return \"{} {}\".format(input_source_name, input_source_number)", "at https://home-assistant.io/components/media_player.pjlink/ \"\"\" import logging import voluptuous as vol from", "pwstate == 'off': self._pwstate = STATE_OFF else: self._pwstate = STATE_ON", "as projector: pwstate = projector.get_power() if pwstate == 'off': self._pwstate", "( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON) import homeassistant.helpers.config_validation as", "add_entities, discovery_info=None): \"\"\"Set up the PJLink platform.\"\"\" host = config.get(CONF_HOST)", "this platform, please refer to the documentation at https://home-assistant.io/components/media_player.pjlink/ \"\"\"", "PjLinkDevice(MediaPlayerDevice): \"\"\"Representation of a PJLink device.\"\"\" def __init__(self, host, port,", "add_entities([device], True) def format_input_source(input_source_name, input_source_number): \"\"\"Format input source for display", "self._pwstate = STATE_OFF self._current_source = None with self.projector() as projector:", "hass_data: return device = PjLinkDevice(host, port, name, encoding, password) hass_data[device_label]", "port, name, encoding, password) hass_data[device_label] = device add_entities([device], True) def", "hass_data = hass.data['pjlink'] device_label = \"{}:{}\".format(host, port) if device_label in", "name(self): \"\"\"Return the name of the device.\"\"\" return self._name @property", "the latest state from the device.\"\"\" with self.projector() as projector:", "latest state from the device.\"\"\" with self.projector() as projector: pwstate", "@property def supported_features(self): \"\"\"Return projector supported features.\"\"\" return SUPPORT_PJLINK def", "projector via the PJLink protocol. For more details about this", "display in UI.\"\"\" return \"{} {}\".format(input_source_name, input_source_number) class PjLinkDevice(MediaPlayerDevice): \"\"\"Representation", "not self._name: self._name = projector.get_name() inputs = projector.get_inputs() self._source_name_mapping =", "please refer to the documentation at https://home-assistant.io/components/media_player.pjlink/ \"\"\" import logging", "return self._current_source @property def source_list(self): \"\"\"Return all available input sources.\"\"\"", "SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, MediaPlayerDevice) from homeassistant.const import ( CONF_HOST, CONF_NAME,", "self.projector() as projector: from pypjlink import MUTE_AUDIO projector.set_mute(MUTE_AUDIO, mute) def", "{}\".format(input_source_name, input_source_number) class PjLinkDevice(MediaPlayerDevice): \"\"\"Representation of a PJLink device.\"\"\" def", "device.\"\"\" def __init__(self, host, port, name, encoding, password): \"\"\"Iinitialize the", "= config.get(CONF_HOST) port = config.get(CONF_PORT) name = config.get(CONF_NAME) encoding =", "\"\"\" Support for controlling projector via the PJLink protocol. For", "x in inputs} self._source_list = sorted(self._source_name_mapping.keys()) def projector(self): \"\"\"Create PJLink", "cv.string, vol.Optional(CONF_ENCODING, default=DEFAULT_ENCODING): cv.string, vol.Optional(CONF_PASSWORD): cv.string, }) SUPPORT_PJLINK = SUPPORT_VOLUME_MUTE", "for display in UI.\"\"\" return \"{} {}\".format(input_source_name, input_source_number) class PjLinkDevice(MediaPlayerDevice):", "( PLATFORM_SCHEMA, SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, MediaPlayerDevice) from homeassistant.const import", "inputs} self._source_list = sorted(self._source_name_mapping.keys()) def projector(self): \"\"\"Create PJLink Projector instance.\"\"\"", "self.projector() as projector: pwstate = projector.get_power() if pwstate == 'off':", "\"\"\" import logging import voluptuous as vol from homeassistant.components.media_player import", "if device_label in hass_data: return device = PjLinkDevice(host, port, name,", "def supported_features(self): \"\"\"Return projector supported features.\"\"\" return SUPPORT_PJLINK def turn_off(self):", "\"\"\"Return all available input sources.\"\"\" return self._source_list @property def supported_features(self):", "cv REQUIREMENTS = ['pypjlink2==1.2.0'] _LOGGER = logging.getLogger(__name__) CONF_ENCODING = 'encoding'", "SUPPORT_SELECT_SOURCE def setup_platform(hass, config, add_entities, discovery_info=None): \"\"\"Set up the PJLink", "https://home-assistant.io/components/media_player.pjlink/ \"\"\" import logging import voluptuous as vol from homeassistant.components.media_player", "= hass.data['pjlink'] device_label = \"{}:{}\".format(host, port) if device_label in hass_data:", "from homeassistant.const import ( CONF_HOST, CONF_NAME, CONF_PASSWORD, CONF_PORT, STATE_OFF, STATE_ON)", "def select_source(self, source): \"\"\"Set the input source.\"\"\" source = self._source_name_mapping[source]", "(false) media player.\"\"\" with self.projector() as projector: from pypjlink import", "SUPPORT_TURN_OFF | SUPPORT_SELECT_SOURCE def setup_platform(hass, config, add_entities, discovery_info=None): \"\"\"Set up", "= config.get(CONF_PORT) name = config.get(CONF_NAME) encoding = config.get(CONF_ENCODING) password =", "state of the device.\"\"\" return self._pwstate @property def is_volume_muted(self): \"\"\"Return", "projector.get_power() if pwstate == 'off': self._pwstate = STATE_OFF else: self._pwstate", "self._host, self._port, self._encoding) projector.authenticate(self._password) return projector def update(self): \"\"\"Get the", "documentation at https://home-assistant.io/components/media_player.pjlink/ \"\"\" import logging import voluptuous as vol", "mute): \"\"\"Mute (true) of unmute (false) media player.\"\"\" with self.projector()", "Projector projector = Projector.from_address( self._host, self._port, self._encoding) projector.authenticate(self._password) return projector", "with self.projector() as projector: from pypjlink import MUTE_AUDIO projector.set_mute(MUTE_AUDIO, mute)", "self._pwstate @property def is_volume_muted(self): \"\"\"Return boolean indicating mute status.\"\"\" return", "\"\"\"Create PJLink Projector instance.\"\"\" from pypjlink import Projector projector =", "{format_input_source(*x): x for x in inputs} self._source_list = sorted(self._source_name_mapping.keys()) def", "= projector.get_mute()[1] self._current_source = \\ format_input_source(*projector.get_input()) @property def name(self): \"\"\"Return", "from the device.\"\"\" with self.projector() as projector: pwstate = projector.get_power()", "hass.data: hass.data['pjlink'] = {} hass_data = hass.data['pjlink'] device_label = \"{}:{}\".format(host,", "discovery_info=None): \"\"\"Set up the PJLink platform.\"\"\" host = config.get(CONF_HOST) port", "projector: projector.set_power('off') def turn_on(self): \"\"\"Turn projector on.\"\"\" with self.projector() as", "sources.\"\"\" return self._source_list @property def supported_features(self): \"\"\"Return projector supported features.\"\"\"", "encoding self._muted = False self._pwstate = STATE_OFF self._current_source = None", "protocol. For more details about this platform, please refer to", "supported features.\"\"\" return SUPPORT_PJLINK def turn_off(self): \"\"\"Turn projector off.\"\"\" with", "\"\"\"Set the input source.\"\"\" source = self._source_name_mapping[source] with self.projector() as", "= False self._pwstate = STATE_OFF self._current_source = None with self.projector()", "SUPPORT_SELECT_SOURCE, SUPPORT_TURN_OFF, SUPPORT_TURN_ON, SUPPORT_VOLUME_MUTE, MediaPlayerDevice) from homeassistant.const import ( CONF_HOST,", "self.projector() as projector: if not self._name: self._name = projector.get_name() inputs", "\"{}:{}\".format(host, port) if device_label in hass_data: return device = PjLinkDevice(host,", "return device = PjLinkDevice(host, port, name, encoding, password) hass_data[device_label] =", "self._port = port self._name = name self._password = password self._encoding", "return \"{} {}\".format(input_source_name, input_source_number) class PjLinkDevice(MediaPlayerDevice): \"\"\"Representation of a PJLink", "instance.\"\"\" from pypjlink import Projector projector = Projector.from_address( self._host, self._port," ]
[ "i < n_s-1: i = i+ 1 if j >=", "Hard def isMatch(s: str, p: str) -> bool: if not", "j<n_p-1: j += 2 else: return False return True if", "not s n_s = len(s) n_p = len(p) j =", "s[i]==s[i-1]: i += 1 j += 1 if p[j] ==", "p[j] and j<n_p-1: j += 2 else: return False return", "return False if p[j] == '*': while s[i]==s[i-1]: i +=", "or s[i] == p[j]: j += 1 # continue elif", "1 # continue elif s[i] != p[j] and j<n_p-1: j", "if p[j] == '.' or s[i] == p[j]: j +=", "!= p[j] and j<n_p-1: j += 2 else: return False", "s[i] != p[j] and j<n_p-1: j += 2 else: return", "= len(s) n_p = len(p) j = 0 i =", "j += 1 if p[j] == '.' or s[i] ==", "Level: Hard def isMatch(s: str, p: str) -> bool: if", "if p[j] == '*': while s[i]==s[i-1]: i += 1 j", "p[j] == '*': while s[i]==s[i-1]: i += 1 j +=", "n_p: return False if p[j] == '*': while s[i]==s[i-1]: i", "+= 1 j += 1 if p[j] == '.' or", "str) -> bool: if not p: return not s n_s", "False return True if __name__ == \"__main__\": ss = 'abbbbbc'", "j += 2 else: return False return True if __name__", "== '*': while s[i]==s[i-1]: i += 1 j += 1", "== p[j]: j += 1 # continue elif s[i] !=", "-> bool: if not p: return not s n_s =", "n_s-1: i = i+ 1 if j >= n_p: return", "< n_s-1: i = i+ 1 if j >= n_p:", "len(s) n_p = len(p) j = 0 i = -1", "= -1 while i < n_s-1: i = i+ 1", "return False return True if __name__ == \"__main__\": ss =", "i+ 1 if j >= n_p: return False if p[j]", "p: return not s n_s = len(s) n_p = len(p)", "i = i+ 1 if j >= n_p: return False", "continue elif s[i] != p[j] and j<n_p-1: j += 2", "= len(p) j = 0 i = -1 while i", "2 else: return False return True if __name__ == \"__main__\":", "bool: if not p: return not s n_s = len(s)", "j >= n_p: return False if p[j] == '*': while", "i += 1 j += 1 if p[j] == '.'", "isMatch(s: str, p: str) -> bool: if not p: return", "else: return False return True if __name__ == \"__main__\": ss", "if __name__ == \"__main__\": ss = 'abbbbbc' p = 'a*'", "0 i = -1 while i < n_s-1: i =", "return not s n_s = len(s) n_p = len(p) j", "# Level: Hard def isMatch(s: str, p: str) -> bool:", "False if p[j] == '*': while s[i]==s[i-1]: i += 1", "while i < n_s-1: i = i+ 1 if j", "s n_s = len(s) n_p = len(p) j = 0", "__name__ == \"__main__\": ss = 'abbbbbc' p = 'a*' print(isMatch(ss,", "n_p = len(p) j = 0 i = -1 while", "return True if __name__ == \"__main__\": ss = 'abbbbbc' p", "1 j += 1 if p[j] == '.' or s[i]", "while s[i]==s[i-1]: i += 1 j += 1 if p[j]", "True if __name__ == \"__main__\": ss = 'abbbbbc' p =", "== \"__main__\": ss = 'abbbbbc' p = 'a*' print(isMatch(ss, p))", "= 0 i = -1 while i < n_s-1: i", "p[j] == '.' or s[i] == p[j]: j += 1", "# continue elif s[i] != p[j] and j<n_p-1: j +=", "if j >= n_p: return False if p[j] == '*':", "p[j]: j += 1 # continue elif s[i] != p[j]", "= i+ 1 if j >= n_p: return False if", "def isMatch(s: str, p: str) -> bool: if not p:", "<reponame>Kaushalya/algo_journal # Level: Hard def isMatch(s: str, p: str) ->", "+= 1 if p[j] == '.' or s[i] == p[j]:", "i = -1 while i < n_s-1: i = i+", "+= 1 # continue elif s[i] != p[j] and j<n_p-1:", "and j<n_p-1: j += 2 else: return False return True", "'.' or s[i] == p[j]: j += 1 # continue", "j += 1 # continue elif s[i] != p[j] and", "if not p: return not s n_s = len(s) n_p", "'*': while s[i]==s[i-1]: i += 1 j += 1 if", "1 if p[j] == '.' or s[i] == p[j]: j", "p: str) -> bool: if not p: return not s", "not p: return not s n_s = len(s) n_p =", "+= 2 else: return False return True if __name__ ==", "n_s = len(s) n_p = len(p) j = 0 i", "== '.' or s[i] == p[j]: j += 1 #", "j = 0 i = -1 while i < n_s-1:", ">= n_p: return False if p[j] == '*': while s[i]==s[i-1]:", "str, p: str) -> bool: if not p: return not", "1 if j >= n_p: return False if p[j] ==", "-1 while i < n_s-1: i = i+ 1 if", "elif s[i] != p[j] and j<n_p-1: j += 2 else:", "s[i] == p[j]: j += 1 # continue elif s[i]", "len(p) j = 0 i = -1 while i <" ]
[ "u'Rule {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def permissions(self,", "of groups were passed in, use them for perm in", "class Meta: model = Page class RedirectFactory(factory.django.DjangoModelFactory): old_slug = factory.Sequence(lambda", "in extracted: self.user_set.add(user) class ACLRuleFactory(factory.django.DjangoModelFactory): class Meta: model = ACLRule", "extracted: self.raw = extracted class Meta: model = Page class", "passed in, use them for user in extracted: self.user_set.add(user) class", "@factory.post_generation def groups(self, create, extracted, **kwargs): if not create: #", "name = factory.Sequence(lambda n: u'Rule {0}'.format(n)) slug = factory.Sequence(lambda n:", "users(self, create, extracted, **kwargs): if not create: # Simple build,", "model = Group name = factory.Sequence(lambda n: \"Group #%s\" %", "def users(self, create, extracted, **kwargs): if not create: # Simple", "n: u'Rule {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def", "in extracted: if not isinstance(perm, Permission): perm = Permission.objects.get(content_type__app_label='waliki', codename=perm)", "ACLRule name = factory.Sequence(lambda n: u'Rule {0}'.format(n)) slug = factory.Sequence(lambda", "factory.Sequence(lambda n: \"Group #%s\" % n) @factory.post_generation def users(self, create,", "extracted: self.users.add(user) @factory.post_generation def groups(self, create, extracted, **kwargs): if not", "do nothing. return if extracted: self.raw = extracted class Meta:", "= Page class RedirectFactory(factory.django.DjangoModelFactory): old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n)) new_slug", "email = factory.LazyAttribute(lambda o: <EMAIL>' % o.username) class Meta: model", "for group in extracted: self.groups.add(group) class PageFactory(factory.django.DjangoModelFactory): title = factory.Sequence(lambda", "password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>') email = factory.LazyAttribute(lambda o: <EMAIL>' %", "= factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def raw(self, create, extracted, **kwargs):", "class Meta: model = Group name = factory.Sequence(lambda n: \"Group", "class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = Group name = factory.Sequence(lambda", "factory.Sequence(lambda n: u'Page {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation", "GroupFactory(factory.django.DjangoModelFactory): class Meta: model = Group name = factory.Sequence(lambda n:", "in, use them for perm in extracted: if not isinstance(perm,", "perm in extracted: if not isinstance(perm, Permission): perm = Permission.objects.get(content_type__app_label='waliki',", "for user in extracted: self.user_set.add(user) class ACLRuleFactory(factory.django.DjangoModelFactory): class Meta: model", "if extracted: # A list of groups were passed in,", "self.raw = extracted class Meta: model = Page class RedirectFactory(factory.django.DjangoModelFactory):", "were passed in, use them for group in extracted: self.groups.add(group)", "self.users.add(user) @factory.post_generation def groups(self, create, extracted, **kwargs): if not create:", "list of groups were passed in, use them for perm", "factory from django.contrib.auth.models import User, Group, Permission from waliki.models import", "Group name = factory.Sequence(lambda n: \"Group #%s\" % n) @factory.post_generation", "them for perm in extracted: if not isinstance(perm, Permission): perm", "Group, Permission from waliki.models import ACLRule, Page, Redirect class UserFactory(factory.django.DjangoModelFactory):", "n: u'old-page{0}'.format(n)) new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n)) class Meta: model", "n: u'page{0}'.format(n)) @factory.post_generation def raw(self, create, extracted, **kwargs): if not", "list of groups were passed in, use them for user", "use them for user in extracted: self.users.add(user) @factory.post_generation def groups(self,", "Meta: model = Group name = factory.Sequence(lambda n: \"Group #%s\"", "new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n)) class Meta: model = Redirect", "Permission): perm = Permission.objects.get(content_type__app_label='waliki', codename=perm) self.permissions.add(perm) @factory.post_generation def users(self, create,", "groups were passed in, use them for perm in extracted:", "import User, Group, Permission from waliki.models import ACLRule, Page, Redirect", "Redirect class UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda n: u'user{0}'.format(n)) password =", "in, use them for user in extracted: self.user_set.add(user) class ACLRuleFactory(factory.django.DjangoModelFactory):", "# A list of groups were passed in, use them", "raw(self, create, extracted, **kwargs): if not create: # Simple build,", "= factory.PostGenerationMethodCall('set_password', '<PASSWORD>') email = factory.LazyAttribute(lambda o: <EMAIL>' % o.username)", "u'old-page{0}'.format(n)) new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n)) class Meta: model =", "title = factory.Sequence(lambda n: u'Page {0}'.format(n)) slug = factory.Sequence(lambda n:", "create: # Simple build, do nothing. return if extracted: #", "return if extracted: self.raw = extracted class Meta: model =", "of groups were passed in, use them for group in", "in, use them for group in extracted: self.groups.add(group) class PageFactory(factory.django.DjangoModelFactory):", "class UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda n: u'user{0}'.format(n)) password = factory.PostGenerationMethodCall('set_password',", "factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def permissions(self, create, extracted, **kwargs): if", "group in extracted: self.groups.add(group) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model =", "perm = Permission.objects.get(content_type__app_label='waliki', codename=perm) self.permissions.add(perm) @factory.post_generation def users(self, create, extracted,", "passed in, use them for user in extracted: self.users.add(user) @factory.post_generation", "self.groups.add(group) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = Group name =", "self.groups.add(group) class PageFactory(factory.django.DjangoModelFactory): title = factory.Sequence(lambda n: u'Page {0}'.format(n)) slug", "user in extracted: self.user_set.add(user) class ACLRuleFactory(factory.django.DjangoModelFactory): class Meta: model =", "u'Page {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def raw(self,", "= factory.Sequence(lambda n: u'old-page{0}'.format(n)) new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n)) class", "o.username) class Meta: model = User @factory.post_generation def groups(self, create,", "if not create: # Simple build, do nothing. return if", "name = factory.Sequence(lambda n: \"Group #%s\" % n) @factory.post_generation def", "class RedirectFactory(factory.django.DjangoModelFactory): old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n)) new_slug = factory.Sequence(lambda", "extracted, **kwargs): if not create: # Simple build, do nothing.", "them for user in extracted: self.users.add(user) @factory.post_generation def groups(self, create,", "Permission from waliki.models import ACLRule, Page, Redirect class UserFactory(factory.django.DjangoModelFactory): username", "n: u'Page {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def", "build, do nothing. return if extracted: # A list of", "groups were passed in, use them for group in extracted:", "them for group in extracted: self.groups.add(group) class GroupFactory(factory.django.DjangoModelFactory): class Meta:", "use them for user in extracted: self.user_set.add(user) class ACLRuleFactory(factory.django.DjangoModelFactory): class", "u'page{0}'.format(n)) @factory.post_generation def permissions(self, create, extracted, **kwargs): if not create:", "u'page{0}'.format(n)) @factory.post_generation def raw(self, create, extracted, **kwargs): if not create:", "nothing. return if extracted: # A list of groups were", "**kwargs): if not create: # Simple build, do nothing. return", "factory.PostGenerationMethodCall('set_password', '<PASSWORD>') email = factory.LazyAttribute(lambda o: <EMAIL>' % o.username) class", "passed in, use them for group in extracted: self.groups.add(group) class", "n: \"Group #%s\" % n) @factory.post_generation def users(self, create, extracted,", "passed in, use them for perm in extracted: if not", "of groups were passed in, use them for user in", "@factory.post_generation def raw(self, create, extracted, **kwargs): if not create: #", "class Meta: model = ACLRule name = factory.Sequence(lambda n: u'Rule", "= factory.Sequence(lambda n: \"Group #%s\" % n) @factory.post_generation def users(self,", "waliki.models import ACLRule, Page, Redirect class UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda", "extracted: self.groups.add(group) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = Group name", "slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def permissions(self, create, extracted,", "class Meta: model = User @factory.post_generation def groups(self, create, extracted,", "in extracted: self.users.add(user) @factory.post_generation def groups(self, create, extracted, **kwargs): if", "PageFactory(factory.django.DjangoModelFactory): title = factory.Sequence(lambda n: u'Page {0}'.format(n)) slug = factory.Sequence(lambda", "= Permission.objects.get(content_type__app_label='waliki', codename=perm) self.permissions.add(perm) @factory.post_generation def users(self, create, extracted, **kwargs):", "were passed in, use them for user in extracted: self.users.add(user)", "def raw(self, create, extracted, **kwargs): if not create: # Simple", "extracted class Meta: model = Page class RedirectFactory(factory.django.DjangoModelFactory): old_slug =", "Simple build, do nothing. return if extracted: # A list", "extracted: if not isinstance(perm, Permission): perm = Permission.objects.get(content_type__app_label='waliki', codename=perm) self.permissions.add(perm)", "<reponame>luzik/waliki<filename>tests/factories.py import factory from django.contrib.auth.models import User, Group, Permission from", "@factory.post_generation def users(self, create, extracted, **kwargs): if not create: #", "them for user in extracted: self.user_set.add(user) class ACLRuleFactory(factory.django.DjangoModelFactory): class Meta:", "Simple build, do nothing. return if extracted: self.raw = extracted", "use them for group in extracted: self.groups.add(group) class PageFactory(factory.django.DjangoModelFactory): title", "model = Page class RedirectFactory(factory.django.DjangoModelFactory): old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n))", "#%s\" % n) @factory.post_generation def users(self, create, extracted, **kwargs): if", "= factory.Sequence(lambda n: u'Rule {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n))", "\"Group #%s\" % n) @factory.post_generation def users(self, create, extracted, **kwargs):", "in, use them for user in extracted: self.users.add(user) @factory.post_generation def", "n) @factory.post_generation def users(self, create, extracted, **kwargs): if not create:", "if extracted: self.raw = extracted class Meta: model = Page", "groups(self, create, extracted, **kwargs): if not create: # Simple build,", "not create: # Simple build, do nothing. return if extracted:", "in, use them for group in extracted: self.groups.add(group) class GroupFactory(factory.django.DjangoModelFactory):", "'<PASSWORD>') email = factory.LazyAttribute(lambda o: <EMAIL>' % o.username) class Meta:", "in extracted: self.groups.add(group) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model = Group", "= factory.Sequence(lambda n: u'Page {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n))", "Permission.objects.get(content_type__app_label='waliki', codename=perm) self.permissions.add(perm) @factory.post_generation def users(self, create, extracted, **kwargs): if", "% n) @factory.post_generation def users(self, create, extracted, **kwargs): if not", "extracted: # A list of groups were passed in, use", "create, extracted, **kwargs): if not create: # Simple build, do", "factory.Sequence(lambda n: u'old-page{0}'.format(n)) new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n)) class Meta:", "factory.LazyAttribute(lambda o: <EMAIL>' % o.username) class Meta: model = User", "{0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def permissions(self, create,", "model = ACLRule name = factory.Sequence(lambda n: u'Rule {0}'.format(n)) slug", "in extracted: self.groups.add(group) class PageFactory(factory.django.DjangoModelFactory): title = factory.Sequence(lambda n: u'Page", "groups were passed in, use them for user in extracted:", "n: u'page{0}'.format(n)) @factory.post_generation def permissions(self, create, extracted, **kwargs): if not", "nothing. return if extracted: self.raw = extracted class Meta: model", "permissions(self, create, extracted, **kwargs): if not create: # Simple build,", "ACLRuleFactory(factory.django.DjangoModelFactory): class Meta: model = ACLRule name = factory.Sequence(lambda n:", "list of groups were passed in, use them for group", "Page, Redirect class UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda n: u'user{0}'.format(n)) password", "User, Group, Permission from waliki.models import ACLRule, Page, Redirect class", "A list of groups were passed in, use them for", "old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n)) new_slug = factory.Sequence(lambda n: u'new-page{0}'.format(n))", "= factory.Sequence(lambda n: u'user{0}'.format(n)) password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>') email =", "isinstance(perm, Permission): perm = Permission.objects.get(content_type__app_label='waliki', codename=perm) self.permissions.add(perm) @factory.post_generation def users(self,", "Meta: model = ACLRule name = factory.Sequence(lambda n: u'Rule {0}'.format(n))", "for group in extracted: self.groups.add(group) class GroupFactory(factory.django.DjangoModelFactory): class Meta: model", "slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def raw(self, create, extracted,", "<EMAIL>' % o.username) class Meta: model = User @factory.post_generation def", "{0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def raw(self, create,", "use them for group in extracted: self.groups.add(group) class GroupFactory(factory.django.DjangoModelFactory): class", "do nothing. return if extracted: # A list of groups", "self.user_set.add(user) class ACLRuleFactory(factory.django.DjangoModelFactory): class Meta: model = ACLRule name =", "= extracted class Meta: model = Page class RedirectFactory(factory.django.DjangoModelFactory): old_slug", "RedirectFactory(factory.django.DjangoModelFactory): old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n)) new_slug = factory.Sequence(lambda n:", "@factory.post_generation def permissions(self, create, extracted, **kwargs): if not create: #", "factory.Sequence(lambda n: u'Rule {0}'.format(n)) slug = factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation", "from waliki.models import ACLRule, Page, Redirect class UserFactory(factory.django.DjangoModelFactory): username =", "= ACLRule name = factory.Sequence(lambda n: u'Rule {0}'.format(n)) slug =", "def groups(self, create, extracted, **kwargs): if not create: # Simple", "factory.Sequence(lambda n: u'user{0}'.format(n)) password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>') email = factory.LazyAttribute(lambda", "were passed in, use them for perm in extracted: if", "def permissions(self, create, extracted, **kwargs): if not create: # Simple", "UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda n: u'user{0}'.format(n)) password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>')", "n: u'user{0}'.format(n)) password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>') email = factory.LazyAttribute(lambda o:", "# Simple build, do nothing. return if extracted: self.raw =", "if not isinstance(perm, Permission): perm = Permission.objects.get(content_type__app_label='waliki', codename=perm) self.permissions.add(perm) @factory.post_generation", "not isinstance(perm, Permission): perm = Permission.objects.get(content_type__app_label='waliki', codename=perm) self.permissions.add(perm) @factory.post_generation def", "build, do nothing. return if extracted: self.raw = extracted class", "username = factory.Sequence(lambda n: u'user{0}'.format(n)) password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>') email", "= factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def permissions(self, create, extracted, **kwargs):", "use them for perm in extracted: if not isinstance(perm, Permission):", "User @factory.post_generation def groups(self, create, extracted, **kwargs): if not create:", "group in extracted: self.groups.add(group) class PageFactory(factory.django.DjangoModelFactory): title = factory.Sequence(lambda n:", "django.contrib.auth.models import User, Group, Permission from waliki.models import ACLRule, Page,", "o: <EMAIL>' % o.username) class Meta: model = User @factory.post_generation", "= User @factory.post_generation def groups(self, create, extracted, **kwargs): if not", "# Simple build, do nothing. return if extracted: # A", "extracted: self.user_set.add(user) class ACLRuleFactory(factory.django.DjangoModelFactory): class Meta: model = ACLRule name", "them for group in extracted: self.groups.add(group) class PageFactory(factory.django.DjangoModelFactory): title =", "for user in extracted: self.users.add(user) @factory.post_generation def groups(self, create, extracted,", "import factory from django.contrib.auth.models import User, Group, Permission from waliki.models", "for perm in extracted: if not isinstance(perm, Permission): perm =", "= factory.LazyAttribute(lambda o: <EMAIL>' % o.username) class Meta: model =", "return if extracted: # A list of groups were passed", "class PageFactory(factory.django.DjangoModelFactory): title = factory.Sequence(lambda n: u'Page {0}'.format(n)) slug =", "factory.Sequence(lambda n: u'page{0}'.format(n)) @factory.post_generation def raw(self, create, extracted, **kwargs): if", "import ACLRule, Page, Redirect class UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda n:", "user in extracted: self.users.add(user) @factory.post_generation def groups(self, create, extracted, **kwargs):", "create: # Simple build, do nothing. return if extracted: self.raw", "from django.contrib.auth.models import User, Group, Permission from waliki.models import ACLRule,", "class ACLRuleFactory(factory.django.DjangoModelFactory): class Meta: model = ACLRule name = factory.Sequence(lambda", "extracted: self.groups.add(group) class PageFactory(factory.django.DjangoModelFactory): title = factory.Sequence(lambda n: u'Page {0}'.format(n))", "model = User @factory.post_generation def groups(self, create, extracted, **kwargs): if", "% o.username) class Meta: model = User @factory.post_generation def groups(self,", "Page class RedirectFactory(factory.django.DjangoModelFactory): old_slug = factory.Sequence(lambda n: u'old-page{0}'.format(n)) new_slug =", "ACLRule, Page, Redirect class UserFactory(factory.django.DjangoModelFactory): username = factory.Sequence(lambda n: u'user{0}'.format(n))", "were passed in, use them for user in extracted: self.user_set.add(user)", "Meta: model = User @factory.post_generation def groups(self, create, extracted, **kwargs):", "codename=perm) self.permissions.add(perm) @factory.post_generation def users(self, create, extracted, **kwargs): if not", "Meta: model = Page class RedirectFactory(factory.django.DjangoModelFactory): old_slug = factory.Sequence(lambda n:", "= Group name = factory.Sequence(lambda n: \"Group #%s\" % n)", "self.permissions.add(perm) @factory.post_generation def users(self, create, extracted, **kwargs): if not create:", "u'user{0}'.format(n)) password = factory.PostGenerationMethodCall('set_password', '<PASSWORD>') email = factory.LazyAttribute(lambda o: <EMAIL>'" ]
[ "self.stage = model.stage self.insert_idx = idx self.file_path = file_path self.real_path", "= {} self.prev_starts = self.model.get_start_nodes(layer) self.prev_breaks = user_dir.breakpoints dirty_nodes =", "self.stage.delete_node(n, layer, remove_layer_data=False) super(RevertNode, self).undo() self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection def", "if self.parent_node_path != nxt_path.WORLD: prev_pos = node_data['pos'] source_layer = self.stage.get_node_source_layer(node)", "self.node_paths: node = layer.lookup(node_path) top_node = self.stage.get_top_node(node, self.model.target_layer) if top_node", "def redo(self): super(LocalizeExecPath, self).redo() self.setText(\"Localize exec input on {}\".format(self.node_path)) class", "layer_path in self.layer_paths: self.undo_effected_layer(layer_path) def redo(self): self.layer_paths = [] self.toggle_state()", "new_node_path != node_path: pos = self.model.get_node_pos(node_path) pos = [pos[0] +", "for nn_p, n in new_node_table: display_node = comp_layer.lookup(nn_p) if display_node", "the model marks a layer as saved we reset the", "self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP self.return_value = None self.prev_selection =", "= self._get_effects(layer_just_saved) where_were_at = self.model.undo_stack.index() cur_cmd = self.model.undo_stack.command(max(0, where_were_at -", "\"\"\"Set node comment\"\"\" def __init__(self, node_path, comment, model, layer_path): super(SetNodeComment,", "getattr(node, INTERNAL_ATTRS.NAME) is_break = self.model.get_is_node_breakpoint(self.node_path, layer) self.node_data = {'parent': parent,", "after saving it. :param layer_just_saved: string of layer real path", "be a bug? We don't touch the top layer in", "only set one attr's data at a # time and", "in self.old_positions.items(): self.model._set_node_pos(node_path=node_path, pos=old_pos, layer=layer) self.undo_effected_layer(self.layer_path) @processing def redo(self): delta_str", "in dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) if", "for parent_path, nodes_dict in common_parent_nodes.items(): for node, old_path in nodes_dict.items():", "= self.model._add_breakpoint else: func = self.model._remove_breakpoint for node_path in self.node_paths:", "and then undo was called, thus this redo has a", "is called after a layer is saved. :param layer_path: string", "self.data = data self.parent_path = parent_path self.layer_path = layer_path self.stage", "self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(LocalizeCompute, self).redo() self.setText(\"Localize", "self.setText(\"Revert instance path on {}\".format(self.node_path)) class LocalizeExecPath(SetNodeAttributeValue): def __init__(self, node_path,", "for node_path in self.created_node_paths: n = self.model.target_layer.lookup(node_path) if n is", "= [self.model.target_layer] for node_path, all_data in self.prev_node_data.items(): apply_data = {}", "= node_path self.attr_name = attr_name self.new_attr_name = new_attr_name self.model =", "{} for old_path, node_data in self.prev_node_data.items(): prev_parent_path = node_data['parent'] prev_parent_node", "get the node node = layer.lookup(self.node_path) dirties = [self.node_path] if", "attr is deleted self.remove_attr = True super(DeleteAttribute, self).undo() layer =", "for new_node_path in self.new_node_paths: old_node_path = self.node_paths[idx] attr_state = self.model.remove_attr_display_state(old_node_path)", "self.stage.get_node_attr_data(node, self.attr_name, layer) def undo(self): super(DeleteAttribute, self).redo() layer = self.model.lookup_layer(self.layer_path)", "= self.model.remove_attr_display_state(old_node_path) if attr_state is not None: self.model._set_attr_display_state(new_node_path, attr_state) #", "= node_path def undo(self): layer = self.model.lookup_layer(self.layer_path) # Remove our", "model, layer_path): super(SetNodeChildOrder, self).__init__(node_path, INTERNAL_ATTRS.CHILD_ORDER, child_order, model, layer_path) def redo(self):", "once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.new_layer_path) self.model.layer_added.emit(self.new_layer_path) self.setText(\"New layer {}\".format(self.new_layer_path)) class ReferenceLayer(NxtCommand): \"\"\"Refernce", "model): super(NxtCommand, self).__init__() self.model = model self.model.layer_saved.connect(self.reset_layer_effected) self._layers_effected_by_me = {}", "layer=self.model.target_layer, remove_layer_data=False) layers = [self.model.target_layer] for node_path, all_data in self.prev_node_data.items():", "a given layer, if the layer is not a top", "None: self.stage.delete_node(n, layer=layer, comp_layer=comp, remove_layer_data=False) n = layer.lookup(self.node_path) if n", "def undo(self): layer = self.model.lookup_layer(self.layer_path) if not self.value: func =", "def redo(self): delta_str = None layer = self.model.lookup_layer(self.layer_path) for node_path,", "self.stage.parent_nodes(nodes=list(nodes_dict.keys()), parent_path=parent_path, layer=layer) for parent_path, nodes_dict in common_parent_nodes.items(): for node,", "= [ancestor_path, child_order] self.prev_node_data[node_path] = node_data nodes += [node] #", "effected (unsaved) layers. If this command was the first to", "14, GRID_SIZE), self.model.top_layer) self.model._set_node_pos(new_node_path, new_pos, layer) idx += 1 self.model.update_comp_layer(rebuild=True)", "layer from the model's set of effected (unsaved) layers. If", "self.stage.get_node_source_layer(node) source_layer.positions.pop(new_path) for parent_path, nodes_dict in common_parent_nodes.items(): self.stage.parent_nodes(nodes=list(nodes_dict.keys()), parent_path=parent_path, layer=layer)", "(name) :param layer_path: real path of layer :param model: StageModel", "return self.setText(\"Set {} exec input to {}\".format(self.node_path, val)) class SetNodeBreakPoint(QUndoCommand):", "it's # original top node. node_hierarchy_data = {} if self.parent_node_path", "= layer.lookup(self.node_path) dirties = [self.node_path] if node is None: parent_path", "= idx self.file_path = file_path self.file_name = file_name self.chdir =", "layer_path): super(SetCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(SetCompute,", "if the layer is not a top layer the top", "node_hierarchy_data.get(top_node, []) top_node_descendant_list += [node] node_hierarchy_data[top_node_path] = top_node_descendant_list if not", "If this command was the first to effect the layer", "'nodes' self.setText('Duplicated {}'.format(nodes_str)) class InstanceNode(SetNodeAttributeValue): \"\"\"Instance nodes on this graph\"\"\"", "sources\"\"\" def __init__(self, node_path, exec_source, model, layer_path): super(SetNodeExecuteSources, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN,", "path in paths: try: new_selection.remove(path) except ValueError: continue super(RemoveFromSelection, self).__init__(new_selection,", "- start) * 1000))) logger.debug(cmd.text() + \" | \" +", "= True super(RenameNode, self).undo() self.node_path = self.old_node_path self.model.selection = [self.node_path]", "from nxt import GRID_SIZE import nxt_editor logger = logging.getLogger(nxt_editor.LOGGER_NAME) def", "self.stage.get_node_source_layer(node) self.model._set_node_pos(old_path, prev_pos, layer=source_layer) # delete any created nodes for", "val = self.data.get(META_ATTRS.VALUE) if val is None: self.setText(\"Removed exec input", "self.model.get_attr_display_state(self.node_path) if attr_display is not None: self.node_data['attr_display'] = attr_display #", "node_path, model, source_layer_path, target_layer_path): src_name = nxt_path.node_name_from_node_path(node_path) parent_path = nxt_path.get_parent_path(node_path)", "layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer parent = self.node_data['parent'] #", "child_order = self.stage.get_node_child_order(ancestor) node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path, child_order] self.prev_node_data[node_path] = node_data", "model): super(SetSelection, self).__init__() self.new_paths = paths self.model = model self.prev_paths", "on {}\".format(attr_path)) class SetCompute(SetNodeAttributeValue): \"\"\"Set node code value\"\"\" def __init__(self,", "super(AddAttribute, self).redo() self.remove_attr = True self.setText(\"Add {} attr to {}\".format(self.attr_name,", "**self.data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += self.return_value if self.attr_name", "self.file_path = file_path self.real_path = nxt_path.full_file_expand(self.file_path, chdir) @processing def undo(self):", "= model.get_node_exec_in(node_path) super(LocalizeExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_path, model, layer_path) def redo(self):", "= '{}, {}'.format(x_delta, y_delta) if len(self.new_positions) == 1: nodes_str =", "layer_path): super(ClearBreakpoints, self).__init__() self.model = model self.layer_path = layer_path self.prev_breaks", "n is not None: self.stage.delete_node(n, layer, remove_layer_data=False) super(RevertNode, self).undo() self.model.update_comp_layer(rebuild=True)", "= self.prev_selection # Fixme: Does not account for rebuilding proxy", "layer.ancestors(node_path) if ancestors: ancestor = ancestors[0] ancestor_path = layer.get_node_path(ancestor) child_order", "for a given layer\"\"\" def __init__(self, model, layer_path): super(ClearBreakpoints, self).__init__()", "= self.model.selection # resulting node self.node_path = None self.created_node_paths =", "super(SetNodeComment, self).redo() self.setText(\"Changed comment on {}\".format(self.node_path)) class SetNodeInstance(SetNodeAttributeValue): \"\"\"Set node", "self.model.top_layer.positions[self.node_path] = pos # This might be a bug? We", ":param other_removed_nodes: list of node paths that will be deleted", "effects the layer not the redo eff_by_redo = False eff_by_undo", ":return: None \"\"\" layer_unsaved = layer_path in self.model.effected_layers eff_by_undo, eff_by_redo", "= self.stage.get_node_child_order(ancestor) node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path, child_order] self.prev_node_data[node_path] = node_data nodes", "name, 'pos': self.model.get_node_pos(self.node_path), 'break': is_break} closest_ancestor = layer.ancestors(self.node_path) if closest_ancestor:", "be # named what it was named when it was", "* 16, 0), layer) self.model._set_node_pos(self.new_path, new_pos, layer) super(InstanceNode, self).redo() self.return_value", "self.setText(\"Rename {} to {}\".format(old_name, new_name)) class SetAttributeComment(SetNodeAttributeData): \"\"\"Set attribute comment\"\"\"", "None self.model = model self.stage = model.stage self.insert_idx = idx", "effected (unsaved) layers. If the layer is not marked as", "super(MuteToggleLayer, self).__init__(model) self.layer_path = layer_path self.model = model self.layer_paths =", "model marks a layer as saved we reset the class", "= color.lower() open_layer_colors += [color] layer_color = layer_color_index[0] for c", "layer_path self.new_positions = node_positions self.old_positions = {} for path in", "= {META_ATTRS.VALUE: value} super(AddAttribute, self).__init__(node_path, attr_name, data, model, layer_path) def", "for layer_path in self.layer_paths: self.undo_effected_layer(layer_path) def redo(self): self.layer_paths = []", "node_path, model, layer_path): comp_layer = model.comp_layer display_node = comp_layer.lookup(node_path) code_lines", "color to {}\".format(layer.filepath, self.color)) def _add_node_hierarchy(base_node_path, model, layer): stage =", "getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER) old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER) new_child_order = list_merger(display_child_order, old_child_order)", "parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH) name = getattr(node, INTERNAL_ATTRS.NAME) is_break =", "redo(self): super(LocalizeInstancePath, self).redo() self.setText(\"Localize instance path to {}\".format(self.node_path)) class RevertInstancePath(SetNodeAttributeValue):", "# resulting nodes self.new_node_paths = [] @processing def undo(self): target_layer", "top_node is None: top_node = node top_node_path = layer.get_node_path(top_node) top_node_descendant_list", "layer.get_muted(local=False) self.model.top_layer.set_mute_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_mute_changed.emit((self.layer_path,)) self.setText(\"Toggle {} muted.\".format(layer.get_alias())) class", "= other_removed_nodes @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer =", "attr_state) # set position for un-parent if self.parent_node_path == nxt_path.WORLD:", "model, layer_path): super(ClearBreakpoints, self).__init__() self.model = model self.layer_path = layer_path", "{}\".format(self.node_path)) class SetNodeInstance(SetNodeAttributeValue): \"\"\"Set node instance\"\"\" def __init__(self, node_path, instance_path,", "in common_parent_nodes.items(): self.stage.parent_nodes(nodes=list(nodes_dict.keys()), parent_path=parent_path, layer=layer) for parent_path, nodes_dict in common_parent_nodes.items():", "self).redo() self.setText(self.text) @processing def redo(self): super(RemoveLayer, self).undo() self.setText(self.text) class MuteToggleLayer(NxtCommand):", "data = {META_ATTRS.VALUE: value} super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data, model, layer_path)", "(INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: self.model.attrs_changed.emit(changed_attrs) if not self.recomp: changed =", "self._layers_effected_by_me = {} def _get_effects(self, layer_path): \"\"\"Gets the effected state", "paths super(AddSelection, self).__init__(new_paths, model) def redo(self): super(AddSelection, self).redo() self.setText('Add {}", "# resulting nodes self.node_path_data = {} self.new_node_paths = [] self.created_node_paths", "node path :param model: StageModel :param layer_path: String of layer", "\"\"\"Add an attribute to a node.\"\"\" def __init__(self, node_path, attr_name,", "= time.time() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) comp = self.model.comp_layer dirties", "to insure attr is deleted self.remove_attr = True super(DeleteAttribute, self).undo()", "'{}, {}'.format(x_delta, y_delta) if len(self.new_positions) == 1: nodes_str = node_path", "def __init__(self, layer_path, model): idx = model.lookup_layer(layer_path).layer_idx() super(RemoveLayer, self).__init__(layer_path, idx,", "model): super(ParentNodes, self).__init__(model) self.parent_node_path = parent_node_path self.parent_node = None self.model", "= model.comp_layer.lookup(node_path) data = model.stage.get_node_attr_data(node, attr_name, model.comp_layer) if META_ATTRS.SOURCE in", "self).__init__(model) self.layer_path = layer_path self.stage = model.stage # get undo", "new_nodes, new_paths, dirty = _add_node_hierarchy(node_path, self.model, layer) target_node = new_nodes[-1]", "data=self.data, parent=self.parent_path, layer=layer.layer_idx(), comp_layer=self.model.comp_layer) dirty_nodes += dirty self.node_path = layer.get_node_path(nodes[0])", "the top layer in redo... self.undo_effected_layer(self.stage.top_layer.real_path) attr_display = self.node_data.get('attr_display') if", "nxt_path.full_file_expand(self.file_path, chdir) @processing def undo(self): new_layer = self.model.lookup_layer(self.real_path) if new_layer", "@processing def undo(self): target_layer = self.model.lookup_layer(self.target_layer_path) # delete duplicated nodes", "nodes for node_path in self.new_node_paths: n = target_layer.lookup(node_path) if n", "model): idx = model.lookup_layer(layer_path).layer_idx() super(RemoveLayer, self).__init__(layer_path, idx, model, None) self.text", "marked as undo effects the # layer, meaning the layer", "inst_path, model, layer_path) def redo(self): super(LocalizeInstancePath, self).redo() self.setText(\"Localize instance path", "self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) self.new_layer_path = new_layer.real_path self.redo_effected_layer(new_layer.real_path) # Fixme: The next", "self.toggle_state() for layer_path in self.layer_paths: self.undo_effected_layer(layer_path) def redo(self): self.layer_paths =", "# restore name prev_name = node_data['name'] name = getattr(node, INTERNAL_ATTRS.NAME)", "self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.setText(\"Remove {} attr from {}\".format(self.attr_name, self.node_path)) class RevertCompute(SetNodeAttributeValue):", "False self.created_node_paths = [] # get the node node =", "if child_order_tuple: ancestor_path, child_order = child_order_tuple ancestor = layer.lookup(ancestor_path) if", "layer) for node_path in self.node_paths: layer.collapse[node_path] = self.value self.model.comp_layer.collapse[node_path] =", "in self.prev_node_data.items(): prev_parent_path = node_data['parent'] prev_parent_node = layer.lookup(prev_parent_path) new_path =", "self).__init__(node_path, INTERNAL_ATTRS.NAME, new_name, model, layer_path) def undo(self): self.model.about_to_rename.emit() self.prev_data['force'] =", "self.value: self.setText(\"Collapsed {}\".format(path_str)) else: self.setText(\"Expanded {}\".format(path_str)) class SetNodeExecuteSources(SetNodeAttributeValue): \"\"\"Set node", "layer.color if color: color = color.lower() open_layer_colors += [color] layer_color", "if eff_by_undo: # This command has already been marked as", "self.model.target_layer, apply_data, self.model.comp_layer) local_attrs = self.stage.get_node_local_attr_names(node_path, layers) for attr in", "self.setText('Localize {}'.format(str(path_str))) class LocalizeUserAttr(SetNodeAttributeData): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, attr_name,", "redo(self): new_selection = [] self.new_node_paths = [] source_layer = self.model.lookup_layer(self.source_layer_path)", "__init__(self, layer_path, model): idx = model.lookup_layer(layer_path).layer_idx() super(RemoveLayer, self).__init__(layer_path, idx, model,", "self.layer_paths: self.undo_effected_layer(layer_path) def redo(self): self.layer_paths = [] self.toggle_state() for layer_path", "self.stage.get_node_data(node, layer) node_data = self.stage.get_node_data(node, layer) node_data['pos'] = self.model.get_node_pos(node_path) node_data['name']", "layer_path: string of layer real path :return: (bool, bool) |", "model.get_node_pos(path) @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) for node_path, old_pos", "class RevertNode(DeleteNode): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, model, layer_path, others):", "self.model._set_node_pos(node_path=node_path, pos=old_pos, layer=layer) self.undo_effected_layer(self.layer_path) @processing def redo(self): delta_str = None", "txt = (\"Set inst path on \" \"{} to {}\".format(self.node_path,", "\"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved) where_were_at = self.model.undo_stack.index() cur_cmd =", "new_paths = curr_selection + paths super(AddSelection, self).__init__(new_paths, model) def redo(self):", "= {'undo': False, 'redo': True} self.model.effected_layers.add(layer_path) else: # Layer was", "{} muted.\".format(layer.get_alias())) class SoloToggleLayer(NxtCommand): \"\"\"Toggles soloing an existing layer\"\"\" def", ":param color: string of new layer alias (name) :param layer_path:", "= color self.old_color = '' self.model = model self.stage =", "pass self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo, 'redo': eff_by_redo} class AddNode(NxtCommand): \"\"\"Add", "command class that can be used to # set multiple", "pos, model, layer_path): super(AddNode, self).__init__(model) self.name = name self.data =", "is not None: self.stage.delete_node(n, layer, remove_layer_data=False) super(RevertNode, self).undo() self.model.update_comp_layer(rebuild=True) self.model.selection", "= self.model.lookup_layer(self.layer_path) self.created_node_paths = [] dirty_nodes = [] nodes, dirty", "= new_nodes[-1] self.created_node_paths += new_paths # self.model.node_added.emit(node_path) # preserve original", "= self.model.lookup_layer(self.layer_path) if self.value: func = self.model._add_breakpoint else: func =", "if node is None: parent_path = nxt_path.get_parent_path(self.node_path) name = nxt_path.node_name_from_node_path(self.node_path)", "self.model.lookup_layer(self.target_layer_path) # delete duplicated nodes for node_path in self.new_node_paths: n", "comp_layer=comp_layer) for nn_p, n in new_node_table: display_node = comp_layer.lookup(nn_p) if", "setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER, ancestor_child_order) self.model.selection = self.prev_selection # Fixme: Does not", "fix_selection self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.redo_effected_layer(layer.real_path) self.setText(\"Delete node: {}\".format(self.node_path)) class SetNodeAttributeData(NxtCommand): \"\"\"Set attribute", "model, layer_path, others) self.rebuild = False # Tells the delete", "= layer.lookup(node_path) name = getattr(node, INTERNAL_ATTRS.NAME) parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH)", "= [self.node_path] self.redo_effected_layer(layer.real_path) self.setText('Added node: {}'.format(self.node_path)) class DeleteNode(NxtCommand): def __init__(self,", "node = layer.lookup(self.node_path) self.data = self.stage.get_node_attr_data(node, self.attr_name, layer) def undo(self):", "node selection for undo self.prev_selection = self.model.selection # get previous", "self.attr_name, layer, quiet=True) if self.prev_data: self.prev_data = copy.deepcopy(self.prev_data) # set", "on {}\".format(self.node_path)) class SetLayerAlias(NxtCommand): \"\"\"Set Layer Alias\"\"\" def __init__(self, alias,", "redo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_color =", "\"\"\" layer_unsaved = layer_path in self.model.effected_layers eff_by_undo, eff_by_redo = self._get_effects(layer_path)", "layer. It is important to note that the other_removed_nodes list", "redo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer self.node_data = {}", "self.parent_node_path = parent_node_path self.parent_node = None self.model = model self.stage", "\"\"\"Select Nodes and Connections\"\"\" def __init__(self, paths, model): super(SetSelection, self).__init__()", "def __init__(self, node_path, model, layer_path): exec_path = model.get_node_exec_in(node_path) super(LocalizeExecPath, self).__init__(node_path,", "undo(self): layer = self.model.target_layer self.undo_effected_layer(layer.real_path) # undo parent common_parent_nodes =", "self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data = {} self.node_path_data", "comp_layer=comp, **self.prev_data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += result if", "eff_by_undo = True self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo, 'redo': eff_by_redo} def", "to {}\".format(self.attr_name, self.node_path)) class DeleteAttribute(AddAttribute): \"\"\"Delete attribute on a node\"\"\"", "We don't want to fix names because we know this", "!= node_path: pos = self.model.get_node_pos(node_path) pos = [pos[0] + 20,", "exec_path = model.get_node_exec_in(node_path) super(LocalizeExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_path, model, layer_path) def", "from selection'.format(self.rem_paths)) class LocalizeNodes(NxtCommand): \"\"\"Localize nodes\"\"\" def __init__(self, node_paths, model):", "super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME, new_name, model, layer_path) def undo(self): self.model.about_to_rename.emit() self.prev_data['force']", "dirty_nodes += [self.node_path] self.undo_effected_layer(self.layer_path) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = self.prev_selection @processing def", "in self.created_node_paths: node = layer.lookup(node_path) if node is not None:", "layer_path: String of layer realpath :param other_removed_nodes: list of node", "ancestor_tuple = self.node_data.get('ancestor_child_order') if ancestor_tuple: ancestor_path, ancestor_child_order = ancestor_tuple ancestor", "self.node_path = self.return_value self.model.selection = [self.node_path] if self.model.get_is_node_start(self.node_path, self.model.comp_layer): self.model.starts_changed.emit(self.model.get_start_nodes())", "new_attr_name) self.setText(\"Rename {} to {}\".format(old_name, new_name)) class SetAttributeComment(SetNodeAttributeData): \"\"\"Set attribute", "command not to re-comp self.created_node_paths = [] self.node_path = node_path", "node instance\"\"\" def __init__(self, node_path, instance_path, model, layer_path): super(SetNodeInstance, self).__init__(node_path,", "an action after saving it. :param layer_just_saved: string of layer", "def __init__(self, node_path, name, model, layer_path): self.old_node_path = node_path layer", "code_lines = model.stage.get_node_code_lines(display_node, comp_layer) super(LocalizeCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path)", "{} to selection'.format(self.added_paths)) class RemoveFromSelection(SetSelection): def __init__(self, paths, model): self.rem_paths", "= node_paths self.model = model self.stage = model.stage self.prev_selection =", "self.remove_attr = False self.created_node_paths = [] # get the node", "KeyError: # Removed by a save action pass def undo_effected_layer(self,", "= layer_path self.model = model self.layer_paths = [] def undo(self):", "target_layer_path): src_name = nxt_path.node_name_from_node_path(node_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(src_name,", "@processing def undo(self): layer = self.model.lookup_layer(self.layer_path) dirty_nodes = [] #", "if node is not None: # delete node _, dirty", "= self.attr_name _, dirties = self.stage.add_node(name=name, data=attr_data, parent=parent_path, layer=layer.layer_idx(), comp_layer=comp,", "super(InstanceNode, self).redo() self.return_value = self.new_path self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE))) class SetNodesPosition(NxtCommand): \"\"\"Move", "[] @processing def undo(self): user_dir.breakpoints[self.layer_path] = self.prev_breaks self.model.nodes_changed.emit(tuple(self.prev_breaks)) @processing def", "= data['instance'] self.stage.transfer_node_data(node, self.model.target_layer, apply_data, self.model.comp_layer) local_attrs = self.stage.get_node_local_attr_names(node_path, layers)", "(GRID_SIZE * 14, GRID_SIZE), self.model.top_layer) self.model._set_node_pos(new_node_path, new_pos, layer) idx +=", "model.stage.get_unique_node_name(name=name, layer=layer, parent_path=parent_path, layer_only=True) super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME, new_name, model, layer_path)", "delta_str)) self.redo_effected_layer(layer.real_path) class SetSelection(QUndoCommand): \"\"\"Select Nodes and Connections\"\"\" def __init__(self,", "the color for a given layer, if the layer is", "fix_names=False) # Fixme: Targeted parenting would avoid the need for", "dirty in dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,)", "sure the layer is properly marked as unsaved even if", "child_order = all_data['data'].get('child_order', []) apply_data['child_order'] = child_order apply_data['attributes'] = data.get('attributes',", "again. eff_by_redo = True eff_by_undo = False else: # Now", "self.file_path, \"real_path\": self.real_path, \"alias\": layer_data['name'] } layer_data.update(extra_data) self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) #", "= new_selection self.model.update_comp_layer(rebuild=True) if len(self.node_paths) == 1: nodes_str = self.node_paths[0]", "color: color = color.lower() open_layer_colors += [color] layer_color = layer_color_index[0]", "layer.set_color_over(self.old_color) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if", "= user_dir.breakpoints dirty_nodes = [] node = layer.lookup(self.node_path) # get", "self.attr_name in INTERNAL_ATTRS.ALL: self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name) attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)}", "comp.get_node_dirties(self.node_path) else: result = self.stage.node_setattr_data(node=n, attr=self.attr_name, layer=layer, create=False, comp_layer=comp, **self.prev_data)", "node_hierarchy_data[top_node_path] = top_node_descendant_list if not node_hierarchy_data: return # parent self.node_path_data", "= layer.ancestors(node_path) if ancestors: ancestor = ancestors[0] ancestor_path = layer.get_node_path(ancestor)", "self.created_node_paths += new_paths self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.model.selection = self.prev_selection self.setText('Revert {}'.format(self.node_path)) class", "def __init__(self, node_paths, descendants, model, source_layer_path, target_layer_path): # TODO: We", "old_path in nodes_dict.items(): node_data = self.prev_node_data[old_path] # restore name prev_name", "= not layer.get_soloed(local=False) self.model.top_layer.set_solo_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_solo_changed.emit((self.layer_path,)) self.setText(\"Toggle {}", "= self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer self.node_data = {} self.prev_starts =", "effects it eff_by_redo = False self.model.effected_layers.add(layer_path) elif eff_by_redo: try: self.model.effected_layers.remove(layer_path)", "model.stage # get undo data self.prev_selection = self.model.selection self.prev_starts =", "self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, instance_path, model, layer_path) def redo(self): super(SetNodeInstance, self).redo() txt", "SetNodeStartPoint(SetNodeAttributeValue): \"\"\"Set this node as the execution start point\"\"\" def", "} layer_data.update(extra_data) self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) # Fixme: The next 2 lines", "eff_by_undo, eff_by_redo = self._get_effects(layer_path) if not eff_by_undo and layer_unsaved: return", "for attr in local_attrs: if attr not in attrs_to_keep: self.stage.delete_node_attr(node=node,", "idx = model.lookup_layer(layer_path).layer_idx() super(RemoveLayer, self).__init__(layer_path, idx, model, None) self.text =", "closest_ancestor = layer.ancestors(self.node_path) if closest_ancestor: closest_ancestor = closest_ancestor[0] else: closest_ancestor", "attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)} else: attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}}", "{}\".format(self.real_path)) class RemoveLayer(ReferenceLayer): \"\"\"Remove existing layer\"\"\" def __init__(self, layer_path, model):", "layer_path): super(RevertCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, [], model, layer_path) def redo(self): super(RevertCompute,", "0 < self.insert_idx <= sub_layer_count: parent_layer = self.stage._sub_layers[self.insert_idx - 1]", "+= [self.node_path] self.undo_effected_layer(self.layer_path) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = self.prev_selection @processing def redo(self):", "in self.others[:]: self.others += comp_layer.get_node_dirties(p) _, dirty = self.stage.delete_node(node, layer,", "renamed to {}\".format(self.old_node_path, self.return_value)) class DuplicateNodes(NxtCommand): \"\"\"Duplicate nodes on this", "to False since now its been saved & the undo", "= self.model.node_has_parent(new_node_path, target_layer) if not has_parent and new_node_path != node_path:", "self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo, 'redo': eff_by_redo} class AddNode(NxtCommand): \"\"\"Add a", "def redo(self): self.setText(\"Revert exec input on {}\".format(self.node_path)) class RevertNode(DeleteNode): \"\"\"Localize", "instance\"\"\" def __init__(self, node_path, instance_path, model, layer_path): super(SetNodeInstance, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH,", "parent_node_path self.parent_node = None self.model = model self.stage = model.stage", "= comp_layer.lookup(nn_p) if display_node is not None: display_child_order = getattr(display_node,", "self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else:", "letting it set text once, relying on consistent delta. x_delta", "is not None: self.stage.delete_node(n, layer=self.model.target_layer, remove_layer_data=False) layers = [self.model.target_layer] for", "if layer_just_saved in self._layers_effected_by_me: if eff_by_undo: # This command has", "as effected. This case happens when undo is called after", "remove_layer_data=rm_layer_data, other_removed_nodes=self.others) dirty_nodes += dirty + [self.node_path] if self.node_path in", "open_layer_colors: layer_color = c break real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir) layer_data", "proxy nodes for the dirty nodes dirty_set = tuple(set(dirty)) self.undo_effected_layer(self.layer_path)", "node_path in self.new_node_paths: n = target_layer.lookup(node_path) if n is not", "@processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.attr_name, self.new_attr_name) self.redo_effected_layer(layer.real_path)", "= nxt_path.node_name_from_node_path(node_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(src_name, model.comp_layer, parent_path=parent_path)", "= self.model.target_layer.lookup(node_path) if not target_node: new_nodes, new_paths, dirty = _add_node_hierarchy(node_path,", "model self.stage = model.stage self.layer_path = layer_path @processing def undo(self):", "is self.model.top_layer: layer.color = self.old_color else: layer.set_color_over(self.old_color) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) @processing", "command has already been marked as undo effects the #", "super(LocalizeCompute, self).redo() self.setText(\"Localize compute on {}\".format(self.node_path)) class LocalizeInstancePath(SetNodeAttributeValue): def __init__(self,", "self.undo_effected_layer(self.layer_path) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = self.prev_selection @processing def redo(self): layer =", "\"\"\"Set node code value\"\"\" def __init__(self, node_path, code_lines, model, layer_path):", "called after a layer is saved. :param layer_path: string of", "layer_path): \"\"\"Gets the effected state for a given layer with", "layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer=layer, comp_layer=comp, remove_layer_data=False)", "self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name) attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)} else: attr_data =", "data, model, layer_path) def redo(self): super(AddAttribute, self).redo() self.remove_attr = True", "ancestor_child_order = ancestor_tuple ancestor = layer.lookup(ancestor_path) if ancestor: setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER,", "path is placed in a list of descendants for each", "= self.stage.get_node_source_layer(node) source_layer.positions.pop(new_path) for parent_path, nodes_dict in common_parent_nodes.items(): self.stage.parent_nodes(nodes=list(nodes_dict.keys()), parent_path=parent_path,", "def __init__(self, node_path, value, model, layer_path): super(SetNodeStartPoint, self).__init__(node_path, INTERNAL_ATTRS.START_POINT, value,", "layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.prev_values = {} for np in", "dirty = self.stage.delete_node(node, layer, remove_layer_data=False) dirty_nodes += dirty node =", "\"\"\" first_eff_by_undo = False first_eff_by_redo = False try: first_eff_by_undo =", "True if not created_node: self.return_value = self.stage.node_setattr_data(node, self.attr_name, layer=layer, create=True,", "attr `_first_effected_by_redo` to False. This makes sure the layer is", "{node: old_path} else: common_parent_nodes[prev_parent_path][node] = old_path child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER) if", "exec_source, model, layer_path): super(SetNodeExecuteSources, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_source, model, layer_path) def", "1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) self.setText(\"Parent {}", "undo(self): layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) for node_path, prev_value in self.prev_values.items():", "self.model.starts_changed.emit(self.model.get_start_nodes()) self.setText(\"{} renamed to {}\".format(self.old_node_path, self.return_value)) class DuplicateNodes(NxtCommand): \"\"\"Duplicate nodes", "for rebuilding proxy nodes for the dirty nodes dirty_set =", "layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) comp = self.model.comp_layer dirties = [self.node_path]", "closest_ancestor: closest_ancestor = closest_ancestor[0] else: closest_ancestor = None closest_ancestor_path =", "self.prev_node_data[old_path] # restore name prev_name = node_data['name'] name = getattr(node,", "self.model.remove_attr_display_state(new_node_path) if attr_state is not None: self.model._set_attr_display_state(old_node_path, attr_state) idx +=", "a save action pass def undo_effected_layer(self, layer_path): \"\"\"Removes layer from", "if attr_display is not None: self.model._set_attr_display_state(self.node_path, attr_display) user_dir.breakpoints = self.prev_breaks", "def redo(self): super(AddAttribute, self).redo() self.remove_attr = True self.setText(\"Add {} attr", "if node is not None: _, dirty = self.stage.delete_node(node, layer,", "INTERNAL_ATTRS.START_POINT, value, model, layer_path) class SetNodeChildOrder(SetNodeAttributeValue): \"\"\"Set node child order\"\"\"", "for p in self.others[:]: self.others += comp_layer.get_node_dirties(p) _, dirty =", "= layer_path self.created_node_paths = [] self.remove_attr = False self.prev_data =", "The way it works now we can only set one", "[] layer = self.model.target_layer for node_path in self.node_paths: node_data =", "= self.model.comp_layer parent = self.node_data['parent'] # We don't want to", "= self.prev_selection # undo_debug(self, start) @processing def redo(self): start =", "= self.node_paths[0] else: path_str = str(self.node_paths) if self.value: self.setText(\"Collapsed {}\".format(path_str))", "child_order = [] if parent_node: child_order = getattr(parent_node, INTERNAL_ATTRS.CHILD_ORDER) else:", "model.comp_layer new_node_paths = [] new_nodes = [] node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path)", "old_name = nxt_path.make_attr_path(self.node_path, attr_name) new_name = nxt_path.make_attr_path(self.node_path, new_attr_name) self.setText(\"Rename {}", "def __init__(self, node_path, child_order, model, layer_path): super(SetNodeChildOrder, self).__init__(node_path, INTERNAL_ATTRS.CHILD_ORDER, child_order,", "node enabled state\"\"\" def __init__(self, node_path, value, model, layer_path): super(SetNodeEnabledState,", "rm_layer_data = True else: rm_layer_data = False for p in", "- prev_pos[1] delta_str = '{}, {}'.format(x_delta, y_delta) if len(self.new_positions) ==", "point\"\"\" def __init__(self, node_path, value, model, layer_path): super(SetNodeStartPoint, self).__init__(node_path, INTERNAL_ATTRS.START_POINT,", "attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) self.setText(\"Changed comment on {}\".format(attr_path)) class SetCompute(SetNodeAttributeValue):", "model) def redo(self): super(AddSelection, self).redo() self.setText('Add {} to selection'.format(self.added_paths)) class", "def undo(self): new_layer = self.model.lookup_layer(self.real_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path)", "node. node_hierarchy_data = {} if self.parent_node_path is nxt_path.WORLD: for node_path", "redo(self): node_path = self.data.get(META_ATTRS.VALUE) layer = self.model.lookup_layer(self.layer_path) new_pos = self.model.get_pos_offset(node_path,", "idx, model, chdir): super(ReferenceLayer, self).__init__(model) self.model = model self.stage =", "self.new_attr_name = new_attr_name self.model = model self.stage = model.stage self.layer_path", "self.setText(\"Set {} alias to {}\".format(layer.filepath, self.alias)) class NewLayer(NxtCommand): \"\"\"Add new", "self.node_data['start'] = is_start self.node_data['save_dict'] = get_node_as_dict(node) if self.node_data['break']: self.model._remove_breakpoint(self.node_path, layer)", "__init__(self, node_path, comment, model, layer_path): super(SetNodeComment, self).__init__(node_path, INTERNAL_ATTRS.COMMENT, comment, model,", "self).__init__(model) self.layer_path = layer_path self.alias = alias self.old_alias = ''", "a layer is saved. :param layer_path: string of layer real", "return # parent self.node_path_data = self.stage.parent_nodes(nodes, self.parent_node_path, layer) self.new_node_paths =", "self.node_path_data = self.stage.parent_nodes(nodes, self.parent_node_path, layer) self.new_node_paths = list(self.node_path_data.values()) idx =", "self.rebuild = False # Tells the delete command not to", "rm_layer_data = True else: rm_layer_data = False comp_layer = self.model.comp_layer", "redo(self): self.prev_node_data = {} self.node_path_data = {} self.new_node_paths = []", "0: rm_layer_data = True else: rm_layer_data = False comp_layer =", "nodes_str = node_path else: nodes_str = 'nodes' self.setText('Move {} {}'.format(nodes_str,", "node_path self.attr_name = attr_name self.new_attr_name = new_attr_name self.model = model", "super(SetLayerColor, self).__init__(model) self.layer_path = layer_path self.color = color self.old_color =", "soloing an existing layer\"\"\" def __init__(self, layer_path, model): super(SoloToggleLayer, self).__init__(model)", "self.color = color self.old_color = '' self.model = model self.stage", "self.prev_selection # undo_debug(self, start) @processing def redo(self): start = time.time()", "given layer with context to this command. Since a single", "dirties = [self.node_path] # delete any created nodes for node_path", "old_path, node_data in self.prev_node_data.items(): prev_parent_path = node_data['parent'] prev_parent_node = layer.lookup(prev_parent_path)", "re-comp self.created_node_paths = [] self.node_path = node_path def undo(self): layer", "undo self.prev_selection = self.model.selection # get previous node data for", "= list_merger(display_child_order, old_child_order) setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order) new_node_paths += [nn_p] new_nodes", "import colors from nxt_editor import user_dir from nxt import nxt_path", "We should make another base command class that can be", "= self.stage.get_top_node(node, self.model.target_layer) if top_node is None: top_node = node", "self.model = model self.stage = model.stage self.layer_path = layer_path @processing", "val)) class SetNodeBreakPoint(QUndoCommand): \"\"\"Set node as a break point\"\"\" def", "= paths new_selection = model.selection[:] for path in paths: try:", "self).redo() self.setText(\"Localize instance path to {}\".format(self.node_path)) class RevertInstancePath(SetNodeAttributeValue): def __init__(self,", "def undo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer parent =", "= model self.layer_path = layer_path self.prev_breaks = [] @processing def", "self.node_path = self.old_node_path self.model.selection = [self.node_path] def redo(self): self.model.about_to_rename.emit() super(RenameNode,", "layer_saved: eff_by_undo = True # Set redo to False since", "Qt.QtWidgets import QUndoCommand # Internal from nxt_editor import colors from", "node new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path, self.model, layer) self.created_node_paths +=", "if not eff_by_undo: self._layers_effected_by_me[layer_path] = {'undo': False, 'redo': True} self.model.effected_layers.add(layer_path)", "self.model.lookup_layer(self.layer_path) # Re-create the node as an empty node new_nodes,", "= layer.lookup(ancestor_path) if ancestor: self.stage.set_node_child_order(ancestor, child_order, layer) if new_path in", "old_root = nxt_path.get_root_path(old_node_path) new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE * 14, GRID_SIZE),", "if prev_parent_path not in list(common_parent_nodes.keys()): common_parent_nodes[prev_parent_path] = {node: old_path} else:", "= [self.node_path] if node is None: parent_path = nxt_path.get_parent_path(self.node_path) name", "dirty_nodes += dirty dirty_nodes += self.created_node_paths dirty_nodes += [self.node_path] self.undo_effected_layer(self.layer_path)", "graph\"\"\" def __init__(self, node_paths, descendants, model, source_layer_path, target_layer_path): # TODO:", "layer data is_start = self.model.get_is_node_start(self.node_path, layer) self.node_data['start'] = is_start self.node_data['save_dict']", "parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(src_name, model.comp_layer, parent_path=parent_path) new_path =", "redo(self): layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.prev_values = {} for np", "self.stage.rename_node_attr(node, attr_name, new_attr_name, layer) self.model.update_comp_layer() old_name = nxt_path.make_attr_path(self.node_path, attr_name) new_name", "class SetLayerColor(NxtCommand): def __init__(self, color, layer_path, model): \"\"\"Sets the color", "= curr_selection + paths super(AddSelection, self).__init__(new_paths, model) def redo(self): super(AddSelection,", "= model.stage.get_unique_node_name(src_name, model.comp_layer, parent_path=parent_path) new_path = nxt_path.join_node_paths(parent_path, new_name) self.new_path =", "for all child nodes for undo self.prev_node_data = {} @processing", "self.stage.node_attr_exists(node, self.attr_name): self.remove_attr = True if not created_node: self.return_value =", "nxt_path.full_file_expand(self.file_path, start=self.chdir) layer_data = {\"parent_layer\": parent_layer, SAVE_KEY.FILEPATH: self.file_path, SAVE_KEY.REAL_PATH: real_path,", "if n is not None: self.stage.delete_node(n, target_layer, remove_layer_data=True) self.model.selection =", "attr_display = self.model.get_attr_display_state(self.node_path) if attr_display is not None: self.node_data['attr_display'] =", "None: self.model._set_attr_display_state(old_node_path, attr_state) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection", "comp_layer=comp, remove_layer_data=False) n = layer.lookup(self.node_path) if n is not None:", "layer.set_muted(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_muted(local=False) self.model.top_layer.set_mute_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path)", "@processing def undo(self): for node_path in self.created_node_paths: n = self.model.target_layer.lookup(node_path)", "prev_pos = node_data['pos'] source_layer = self.stage.get_node_source_layer(node) self.model._set_node_pos(old_path, prev_pos, layer=source_layer) #", "def undo_debug(cmd, start): update_time = str(int(round((time.time() - start) * 1000)))", "self.model.comp_layer) local_attrs = self.stage.get_node_local_attr_names(node_path, layers) for attr in local_attrs: if", "\"\"\"Revert compute\"\"\" def __init__(self, node_path, model, layer_path): super(RevertCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE,", "from nxt_editor import colors from nxt_editor import user_dir from nxt", "self.node_data.get('ancestor_child_order') if ancestor_tuple: ancestor_path, ancestor_child_order = ancestor_tuple ancestor = layer.lookup(ancestor_path)", "String of layer realpath :param other_removed_nodes: list of node paths", "self.undo_effected_layer(layer_path) def redo(self): self.layer_paths = [] self.toggle_state() for layer_path in", "list(user_dir.breakpoints.keys()): user_dir.breakpoints.pop(self.layer_path) self.model.nodes_changed.emit(tuple(self.prev_breaks)) self.setText(\"Clear all breakpoints\") class SetNodeStartPoint(SetNodeAttributeValue): \"\"\"Set this", "if self.prev_data: self.prev_data = copy.deepcopy(self.prev_data) # set attribute value this", "dirty def undo_debug(cmd, start): update_time = str(int(round((time.time() - start) *", "node_path, attr_name, value, model, layer_path): data = {META_ATTRS.VALUE: value} super(SetNodeAttributeValue,", "in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) if len(self.node_paths) == 1: path_str", "False eff_by_undo = True self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo, 'redo': eff_by_redo}", "nxt_path.make_attr_path(self.node_path, attr_name) new_name = nxt_path.make_attr_path(self.node_path, new_attr_name) self.setText(\"Rename {} to {}\".format(old_name,", "def __init__(self, layer_path, model): super(SoloToggleLayer, self).__init__(model) self.layer_path = layer_path self.model", "> 0: rm_layer_data = True else: rm_layer_data = False for", "self).undo() layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.setText(\"Remove {} attr from {}\".format(self.attr_name,", "node_path, instance_path, model, layer_path): super(SetNodeInstance, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, instance_path, model, layer_path)", "all_data['data'] child_order = all_data['data'].get('child_order', []) apply_data['child_order'] = child_order apply_data['attributes'] =", "path :param model: StageModel :param layer_path: String of layer realpath", "overrides. :param color: string of new layer alias (name) :param", "if ancestor: setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER, ancestor_child_order) self.model.selection = self.prev_selection # Fixme:", "self.model.set_target_layer(self.new_layer_path) self.model.layer_added.emit(self.new_layer_path) self.setText(\"New layer {}\".format(self.new_layer_path)) class ReferenceLayer(NxtCommand): \"\"\"Refernce existing layer\"\"\"", "processing(func): def wrapper(self): self.model.processing.emit(True) func(self) self.model.processing.emit(False) return wrapper class NxtCommand(QUndoCommand):", "= prev_value self.model.comp_layer.collapse[node_path] = prev_value self.model.collapse_changed.emit(list(self.prev_values.keys())) @processing def redo(self): layer", "self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if layer", "self.prev_data: self.prev_data = copy.deepcopy(self.prev_data) # set attribute value this also", "# get layer data is_start = self.model.get_is_node_start(self.node_path, layer) self.node_data['start'] =", "try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed by a save action", "GRID_SIZE), self.model.top_layer) self.model._set_node_pos(new_node_path, new_pos, layer) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection", "@processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) for node_path, prev_value", "{}\".format(path_str)) else: self.setText(\"Expanded {}\".format(path_str)) class SetNodeExecuteSources(SetNodeAttributeValue): \"\"\"Set node execute sources\"\"\"", "data to be set if undo is called layer =", "self.node_path in self.model.selection: fix_selection = self.model.selection[:] fix_selection.remove(self.node_path) self.model.selection = fix_selection", "INTERNAL number of attrs. super(DuplicateNodes, self).__init__(model) self.node_paths = node_paths self.descendants", "= comp_layer.lookup(node_path) code_lines = model.stage.get_node_code_lines(display_node, comp_layer) super(LocalizeCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines,", "comment on {}\".format(self.node_path)) class SetNodeInstance(SetNodeAttributeValue): \"\"\"Set node instance\"\"\" def __init__(self,", "layer real path :return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo) \"\"\"", "@processing def redo(self): super(RemoveLayer, self).undo() self.setText(self.text) class MuteToggleLayer(NxtCommand): \"\"\"Toggles muting", "be a # setattr. The way it works now we", "{self.return_value: self.data.get(META_ATTRS.VALUE)} else: attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}} self.return_value =", "{}\".format(path_str)) class ClearBreakpoints(QUndoCommand): \"\"\"Clear all the breakpoints for a given", "attr_display # get layer data is_start = self.model.get_is_node_start(self.node_path, layer) self.node_data['start']", "+= comp_layer.get_node_dirties(p) _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data, other_removed_nodes=self.others)", "= self._get_effects(layer_path) layer_saved = layer_path not in self.model.effected_layers if layer_saved:", "not None: display_child_order = getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER) old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER)", "model.stage self.layer_path = layer_path self.prev_values = {} @processing def undo(self):", "\"\"\"Delete attribute on a node\"\"\" def __init__(self, node_path, attr_name, model,", "created nodes for node_path in self.created_node_paths: n = layer.lookup(node_path) if", "self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(target_layer.real_path) @processing def redo(self): new_selection = [] self.new_node_paths =", "= model.stage self.layer_path = layer_path @processing def undo(self): layer =", "new_selection.remove(path) except ValueError: continue super(RemoveFromSelection, self).__init__(new_selection, model) def redo(self): super(RemoveFromSelection,", "model.comp_layer display_node = comp_layer.lookup(node_path) code_lines = model.stage.get_node_code_lines(display_node, comp_layer) super(LocalizeCompute, self).__init__(node_path,", "= model.stage self.insert_idx = idx self.file_path = file_path self.file_name =", "path :return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo) \"\"\" first_eff_by_undo =", "new_pos in self.new_positions.items(): self.model._set_node_pos(node_path=node_path, pos=new_pos, layer=layer) if not delta_str: pos", "placed in a list of descendants for each top node", "{} self.prev_starts = self.model.get_start_nodes(layer) self.prev_breaks = user_dir.breakpoints dirty_nodes = []", "a single command can effect layers in different ways. :param", "self.stage = model.stage self.layer_path = layer_path @processing def undo(self): layer", "except KeyError: # Removed by a save action pass def", "def __init__(self, layer_path, model): super(MuteToggleLayer, self).__init__(model) self.layer_path = layer_path self.model", "# delete node _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data)", "alias (name) :param layer_path: real path of layer :param model:", "new_nodes += [n] return new_nodes, new_node_paths, dirty def undo_debug(cmd, start):", "[nn_p] new_nodes += [n] return new_nodes, new_node_paths, dirty def undo_debug(cmd,", "self.model.get_node_pos(node_path) node_data['name'] = name node_data['parent'] = parent_path parent_node = layer.lookup(parent_path)", "self.model.top_layer: layer.color = self.old_color else: layer.set_color_over(self.old_color) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) @processing def", "new_child_order) new_node_paths += [nn_p] new_nodes += [n] return new_nodes, new_node_paths,", "= paths curr_selection = model.selection new_paths = curr_selection + paths", "if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) changed_attrs = ()", "= [] for layer in self.stage._sub_layers: color = layer.color if", "of node paths that will be deleted in this event", "SAVE_KEY.FILEPATH: self.file_path, SAVE_KEY.REAL_PATH: real_path, SAVE_KEY.COLOR: layer_color, SAVE_KEY.ALIAS: self.file_name } new_layer", "layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.new_attr_name, self.attr_name) self.undo_effected_layer(layer.real_path) @processing def redo(self):", "\"\"\"Localize nodes\"\"\" def __init__(self, node_path, model, layer_path, others): super(RevertNode, self).__init__(node_path,", "= self.new_paths self.setText('Set selection: {}'.format(str(self.new_paths))) class AddSelection(SetSelection): def __init__(self, paths,", "new, dirty = self.stage.duplicate_node(node=node, layer=target_layer, descendants=self.descendants) new_selection.append(target_layer.get_node_path(new[0])) # process new", "node_path in self.node_paths: node = layer.lookup(node_path) top_node = self.stage.get_top_node(node, self.model.target_layer)", "source_layer.positions.pop(new_path) for parent_path, nodes_dict in common_parent_nodes.items(): self.stage.parent_nodes(nodes=list(nodes_dict.keys()), parent_path=parent_path, layer=layer) for", ":param layer_path: string of layer real path :return: (bool, bool)", "attrs_to_keep: self.stage.delete_node_attr(node=node, attr_name=attr) self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(layers[0].real_path) self.model.selection = self.prev_selection @processing def", "continue data = all_data['data'] child_order = all_data['data'].get('child_order', []) apply_data['child_order'] =", "in INTERNAL_ATTRS.ALL: self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name) attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)} else:", "= False self.created_node_paths = [] # get the node node", "self.node_paths[0] else: path_str = str(self.node_paths) if self.value: self.setText(\"Collapsed {}\".format(path_str)) else:", "set of effected (unsaved) layers. If the layer is not", "if not self.value: func = self.model._add_breakpoint else: func = self.model._remove_breakpoint", "self).__init__() self.model = model self.model.layer_saved.connect(self.reset_layer_effected) self._layers_effected_by_me = {} def _get_effects(self,", "toggle_state(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: state =", "str(int(round((time.time() - start) * 1000))) logger.debug(cmd.text() + \" | \"", "= layer_path self.stage = model.stage # get undo data self.prev_selection", "the attribute if it does not exist if not self.stage.node_attr_exists(node,", "\"\"\"Sets the color for a given layer, if the layer", "duplicated nodes for node_path in self.new_node_paths: n = target_layer.lookup(node_path) if", "force=True) # restore position if self.parent_node_path != nxt_path.WORLD: prev_pos =", "is_break = self.model.get_is_node_breakpoint(self.node_path, layer) self.node_data = {'parent': parent, 'name': name,", "+ paths super(AddSelection, self).__init__(new_paths, model) def redo(self): super(AddSelection, self).redo() self.setText('Add", "new_pos, layer) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = list(self.node_path_data.values()) if", "super(SetNodeStartPoint, self).__init__(node_path, INTERNAL_ATTRS.START_POINT, value, model, layer_path) class SetNodeChildOrder(SetNodeAttributeValue): \"\"\"Set node", "model.selection[:] for path in paths: try: new_selection.remove(path) except ValueError: continue", "when undo is called after a layer is saved. :param", "attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP self.return_value = None self.prev_selection = model.selection @processing", "layer=layer) if not delta_str: pos = new_pos prev_pos = self.old_positions[node_path]", "layer_path) def redo(self): super(SetAttributeComment, self).redo() attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) self.setText(\"Changed", "self).__init__(model) self.model = model self.stage = model.stage self.insert_idx = idx", "self.model.layer_saved.connect(self.reset_layer_effected) self._layers_effected_by_me = {} def _get_effects(self, layer_path): \"\"\"Gets the effected", "[] self.new_node_paths = [] source_layer = self.model.lookup_layer(self.source_layer_path) target_layer = self.model.lookup_layer(self.target_layer_path)", "self.prev_selection self.setText('Revert {}'.format(self.node_path)) class ParentNodes(NxtCommand): \"\"\"Parent Nodes\"\"\" def __init__(self, node_paths,", "= {} self.others = other_removed_nodes @processing def undo(self): layer =", "exec input to {}\".format(self.node_path, val)) class SetNodeBreakPoint(QUndoCommand): \"\"\"Set node as", "new_name, model, layer_path) def undo(self): self.model.about_to_rename.emit() self.prev_data['force'] = True super(RenameNode,", "create=True, comp_layer=comp, **self.data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += self.return_value", "= nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(name=name, layer=layer, parent_path=parent_path, layer_only=True) super(RenameNode, self).__init__(node_path,", "DuplicateNodes(NxtCommand): \"\"\"Duplicate nodes on this graph\"\"\" def __init__(self, node_paths, descendants,", "layer = self.model.target_layer self.redo_effected_layer(layer.real_path) for node_path in self.node_paths: node =", "self.old_alias = layer.get_alias(fallback_to_local=False) layer.set_alias_over(self.alias) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) self.setText(\"Set {} alias to", "if not self.stage.node_attr_exists(node, self.attr_name): self.remove_attr = True if not created_node:", "else: closest_ancestor = None closest_ancestor_path = layer.get_node_path(closest_ancestor) if closest_ancestor_path: ancestor_child_order", "data=self.node_data['save_dict'], parent=parent, layer=layer.layer_idx(), comp_layer=comp_layer, fix_names=False) if self.node_data['break']: self.model._add_breakpoint(self.node_path, layer) self.model._add_breakpoint(self.node_path,", "for k in colors.LAYER_COLORS] open_layer_colors = [] for layer in", "path to the list and emit model signal new_node_path =", "loop. \"\"\" super(DeleteNode, self).__init__(model) self.layer_path = layer_path self.stage = model.stage", "# named what it was named when it was deleted", "self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += result if self.attr_name in INTERNAL_ATTRS.ALL:", "attr not in attrs_to_keep: self.stage.delete_node_attr(node=node, attr_name=attr) self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(layers[0].real_path) self.model.selection =", "display_node = self.model.comp_layer.lookup(node_path) if not display_node: continue # add node", "process new nodes for new_node in new: # add new", "set multiple attr's data. That way duplicate can just be", "self.node_paths[0] else: path_str = str(self.node_paths) if self.value: self.setText(\"Add breakpoint to", "layer real path :return: None \"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_path)", "self).__init__(new_path, INTERNAL_ATTRS.INSTANCE_PATH, node_path, model, target_layer_path) def redo(self): node_path = self.data.get(META_ATTRS.VALUE)", "command macro. The list will be mutated by the stage", "self).__init__(model) self.parent_node_path = parent_node_path self.parent_node = None self.model = model", "= self.stage.get_node_local_attr_names(node_path, layers) for attr in local_attrs: if attr not", "= node_data nodes += [node] # get current node hierarchy", "source_layer.lookup(node_path) # duplicate node new, dirty = self.stage.duplicate_node(node=node, layer=target_layer, descendants=self.descendants)", "idx = 0 for new_node_path in self.new_node_paths: old_node_path = self.node_paths[idx]", "target_layer, remove_layer_data=True) self.model.selection = self.prev_selection self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(target_layer.real_path) @processing def redo(self):", "self.model.get_is_node_breakpoint(self.node_path, layer) self.node_data = {'parent': parent, 'name': name, 'pos': self.model.get_node_pos(self.node_path),", "= [] self.toggle_state() for layer_path in self.layer_paths: self.redo_effected_layer(layer_path) @processing def", "self.model.get_node_collapse(np, layer) for node_path in self.node_paths: layer.collapse[node_path] = self.value self.model.comp_layer.collapse[node_path]", "\" | \" + update_time + \"ms\") def redo_debug(cmd, start):", "nxt_io.load_file_data(self.real_path) extra_data = {\"parent_layer\": parent_layer, \"filepath\": self.file_path, \"real_path\": self.real_path, \"alias\":", "= self.model.selection[:] fix_selection.remove(self.node_path) self.model.selection = fix_selection self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.redo_effected_layer(layer.real_path) self.setText(\"Delete node:", "__init__(self, node_positions, model, layer_path): super(SetNodesPosition, self).__init__(model) self.model = model self.layer_path", "super(RemoveFromSelection, self).redo() self.setText('Remove {} from selection'.format(self.rem_paths)) class LocalizeNodes(NxtCommand): \"\"\"Localize nodes\"\"\"", "self.model.selection[:] fix_selection.remove(self.node_path) self.model.selection = fix_selection self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.redo_effected_layer(layer.real_path) self.setText(\"Delete node: {}\".format(self.node_path))", "in self.node_paths: node = source_layer.lookup(node_path) # duplicate node new, dirty", "def redo(self): super(SetNodeChildOrder, self).redo() self.setText(\"Change child order on {}\".format(self.node_path)) class", "redo_effected_layer(self, layer_path): \"\"\"Adds layer to the model's set of effected", "self.stage.add_node(name=name, data=attr_data, parent=parent_path, layer=layer.layer_idx(), comp_layer=comp, fix_names=False) # Fixme: Targeted parenting", "self).redo() if self.data.get(META_ATTRS.VALUE): self.setText(\"Enabled {}\".format(self.node_path)) else: self.setText(\"Disabled {}\".format(self.node_path)) class SetNodeCollapse(NxtCommand):", "for node_path, prev_value in self.prev_values.items(): layer.collapse[node_path] = prev_value self.model.comp_layer.collapse[node_path] =", "self.model.comp_layer self.node_data = {} self.prev_starts = self.model.get_start_nodes(layer) self.prev_breaks = user_dir.breakpoints", "touch the top layer in redo... self.undo_effected_layer(self.stage.top_layer.real_path) attr_display = self.node_data.get('attr_display')", "layer_path: string of layer real path :return: None \"\"\" layer_unsaved", "= {\"parent_layer\": parent_layer, SAVE_KEY.FILEPATH: self.file_path, SAVE_KEY.REAL_PATH: real_path, SAVE_KEY.COLOR: layer_color, SAVE_KEY.ALIAS:", "def toggle_state(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: state", "in self.model.selection: fix_selection = self.model.selection[:] fix_selection.remove(self.node_path) self.model.selection = fix_selection self.model.nodes_changed.emit(tuple(set(dirty_nodes)))", "new_node_path = target_layer.get_node_path(new_node) self.new_node_paths += [new_node_path] # self.model.node_added.emit(new_node_path) # set", "self.model.attrs_changed.emit(changed_attrs) if not self.recomp: changed = tuple([self.node_path] + self.created_node_paths) self.model.nodes_changed.emit(changed)", "logging.getLogger(nxt_editor.LOGGER_NAME) def processing(func): def wrapper(self): self.model.processing.emit(True) func(self) self.model.processing.emit(False) return wrapper", "layer to the model's set of effected (unsaved) layers. If", "def __init__(self, node_path, model, source_layer_path, target_layer_path): src_name = nxt_path.node_name_from_node_path(node_path) parent_path", "= self.data.get(META_ATTRS.VALUE) layer = self.model.lookup_layer(self.layer_path) new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE *", "self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.redo_effected_layer(layer.real_path) self.model.selection = self.prev_selection if len(self.node_paths) == 1: path_str", "# delete any created nodes for node_path in self.created_node_paths: node", "class SetNodeBreakPoint(QUndoCommand): \"\"\"Set node as a break point\"\"\" def __init__(self,", "layer) self.model.update_comp_layer() old_name = nxt_path.make_attr_path(self.node_path, attr_name) new_name = nxt_path.make_attr_path(self.node_path, new_attr_name)", "None: display_child_order = getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER) old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER) new_child_order", "path to {}\".format(self.node_path)) class RevertInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path):", "# We don't want to fix names because we know", "def undo(self): self.model.selection = self.prev_paths def redo(self): self.model.selection = self.new_paths", "node_data['data'] = get_node_as_dict(target_node) # localize source node self.stage.transfer_node_data(target_node, self.model.target_layer, display_node,", "self.model.top_layer: state = not layer.get_soloed(local=True) layer.set_soloed(state) self.layer_paths.append(layer.real_path) else: state =", "self.node_data.get('pos') if pos: self.model.top_layer.positions[self.node_path] = pos # This might be", "{}\".format(self.node_path)) return self.setText(\"Set {} exec input to {}\".format(self.node_path, val)) class", "= self.model.remove_attr_display_state(new_node_path) if attr_state is not None: self.model._set_attr_display_state(old_node_path, attr_state) idx", "= None self.created_node_paths = [] @processing def undo(self): layer =", "layer_path) def redo(self): super(SetNodeExecuteSources, self).redo() val = self.data.get(META_ATTRS.VALUE) if val", "attribute comment\"\"\" def __init__(self, node_path, attr_name, comment, model, layer_path): data", "self.new_node_paths = [] self.created_node_paths = [] nodes = [] layer", "= self._get_effects(layer_path) if not eff_by_undo and layer_unsaved: return if not", "this redo has a # net zero effect on the", "layer_path): super(SetNodeComment, self).__init__(node_path, INTERNAL_ATTRS.COMMENT, comment, model, layer_path) def redo(self): super(SetNodeComment,", "self.file_name = file_name self.chdir = chdir @processing def undo(self): new_layer", "undo is called layer = self.model.lookup_layer(self.layer_path) node = layer.lookup(self.node_path) self.data", "self.setText(\"Set {} color to {}\".format(layer.filepath, self.color)) def _add_node_hierarchy(base_node_path, model, layer):", "None: self.stage.delete_node(n, layer, remove_layer_data=False) super(RevertNode, self).undo() self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection", "class SetNodeExecuteSources(SetNodeAttributeValue): \"\"\"Set node execute sources\"\"\" def __init__(self, node_path, exec_source,", "node_path in self.created_node_paths: node = layer.lookup(node_path) if node is not", "on \" \"{} to {}\".format(self.node_path, self.data.get(META_ATTRS.VALUE))) self.setText(txt) class SetNodeEnabledState(SetNodeAttributeValue): \"\"\"Set", "= value self.model = model self.layer_path = layer_path @processing def", "layer, quiet=True) if self.prev_data: self.prev_data = copy.deepcopy(self.prev_data) # set attribute", "self.model._set_attr_display_state(new_node_path, attr_state) # set position for un-parent if self.parent_node_path ==", "layer_path self.color = color self.old_color = '' self.model = model", "is self.model.top_layer: self.old_alias = layer.get_alias(local=True) layer.set_alias(self.alias) else: self.old_alias = layer.get_alias(fallback_to_local=False)", "self.prev_node_data = {} self.created_node_paths = [] @processing def undo(self): for", "{}\".format(self.node_path)) class SetNodeAttributeData(NxtCommand): \"\"\"Set attribute value\"\"\" def __init__(self, node_path, attr_name,", "= attr_display # get layer data is_start = self.model.get_is_node_start(self.node_path, layer)", "self.stage.set_node_child_order(ancestor, child_order, layer) if new_path in list(self.model.top_layer.positions.keys()): source_layer = self.stage.get_node_source_layer(node)", "is not None: self.model._set_attr_display_state(old_node_path, attr_state) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection", "layer is self.model.top_layer: state = not layer.get_muted(local=True) layer.set_muted(state) self.layer_paths.append(layer.real_path) else:", "for each top node so when # they are un-parented", "layer is properly marked as unsaved even if we undo", "\"ms\") def redo_debug(cmd, start): update_time = str(int(round((time.time() - start) *", "DeleteNode(NxtCommand): def __init__(self, node_path, model, layer_path, other_removed_nodes): \"\"\"Delete node from", "self.layer_path = layer_path self.created_node_paths = [] self.remove_attr = False self.prev_data", "None self.model = model self.stage = model.stage self.node_paths = node_paths", "\"filepath\": self.file_path, \"real_path\": self.real_path, \"alias\": layer_data['name'] } layer_data.update(extra_data) self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx)", "layer=layer, create=False, comp_layer=comp, **self.prev_data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties +=", "model, layer_path) def undo(self): self.model.about_to_rename.emit() self.prev_data['force'] = True super(RenameNode, self).undo()", "it as effected. This case happens when undo is called", "\"\"\"Move nodes\"\"\" def __init__(self, node_positions, model, layer_path): super(SetNodesPosition, self).__init__(model) self.model", "self.new_positions = node_positions self.old_positions = {} for path in self.new_positions.keys():", "node_data self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.redo_effected_layer(layer.real_path) self.model.selection = self.prev_selection if len(self.node_paths) == 1:", "new_layer.real_path self.redo_effected_layer(new_layer.real_path) # Fixme: The next 2 lines each build", "self.added_paths = paths curr_selection = model.selection new_paths = curr_selection +", "redo(self): self.model.selection = self.new_paths self.setText('Set selection: {}'.format(str(self.new_paths))) class AddSelection(SetSelection): def", "for node_path in self.node_paths: layer.collapse[node_path] = self.value self.model.comp_layer.collapse[node_path] = self.value", "start) * 1000))) logger.debug(cmd.text() + \" | \" + update_time", "existing layer\"\"\" def __init__(self, layer_path, model): super(MuteToggleLayer, self).__init__(model) self.layer_path =", "SetNodeInstance(SetNodeAttributeValue): \"\"\"Set node instance\"\"\" def __init__(self, node_path, instance_path, model, layer_path):", "self.model.nodes_changed.emit(tuple(self.node_paths)) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if self.value: func", "[self.node_path] # delete any created nodes for node_path in self.created_node_paths:", "eff_by_redo} def redo_effected_layer(self, layer_path): \"\"\"Adds layer to the model's set", "class AddSelection(SetSelection): def __init__(self, paths, model): self.added_paths = paths curr_selection", "__init__(self, node_path, model, layer_path): super(RevertInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, None, model, layer_path)", "= model.stage.get_node_code_lines(display_node, comp_layer) super(LocalizeCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def", "new_attr_name self.model = model self.stage = model.stage self.layer_path = layer_path", "self).__init__(node_path, INTERNAL_ATTRS.START_POINT, value, model, layer_path) class SetNodeChildOrder(SetNodeAttributeValue): \"\"\"Set node child", "class attr `_first_effected_by_redo` to False. This makes sure the layer", "layer = self.model.lookup_layer(self.layer_path) if self.value: func = self.model._add_breakpoint else: func", "layer try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed by a save", "model, None) self.text = \"Removed reference to {}\".format(layer_path) @processing def", "start) class SetNodeAttributeValue(SetNodeAttributeData): def __init__(self, node_path, attr_name, value, model, layer_path):", "from nxt_editor import user_dir from nxt import nxt_path from nxt.nxt_layer", "= closest_ancestor[0] else: closest_ancestor = None closest_ancestor_path = layer.get_node_path(closest_ancestor) if", "not None: self.stage.delete_node(n, target_layer, remove_layer_data=True) self.model.selection = self.prev_selection self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(target_layer.real_path)", "node_path, attr_name, comment, model, layer_path): data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment} super(SetAttributeComment,", "layer_data = {\"parent_layer\": parent_layer, SAVE_KEY.FILEPATH: self.file_path, SAVE_KEY.REAL_PATH: real_path, SAVE_KEY.COLOR: layer_color,", "local_attrs: if attr not in attrs_to_keep: self.stage.delete_node_attr(node=node, attr_name=attr) self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(layers[0].real_path)", "for node_path in self.node_paths: node = source_layer.lookup(node_path) # duplicate node", "nxt_path.str_path_to_node_namespace(base_node_path) new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy, parent=None, layer=layer, comp_layer=comp_layer) for nn_p,", "INTERNAL_ATTRS.CHILD_ORDER) else: ancestors = layer.ancestors(node_path) if ancestors: ancestor = ancestors[0]", "> 0: rm_layer_data = True else: rm_layer_data = False comp_layer", "class DeleteAttribute(AddAttribute): \"\"\"Delete attribute on a node\"\"\" def __init__(self, node_path,", "self.node_path = node_path self.nice_attr_name = attr_name self.attr_name = attr_name self.data", "= [] node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path) new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy, parent=None,", "common_parent_nodes.items(): self.stage.parent_nodes(nodes=list(nodes_dict.keys()), parent_path=parent_path, layer=layer) for parent_path, nodes_dict in common_parent_nodes.items(): for", "layer_path) def redo(self): self.setText(\"Revert exec input on {}\".format(self.node_path)) class RevertNode(DeleteNode):", "real path :return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo) \"\"\" first_eff_by_undo", "by setting the class attr `_first_effected_by_redo` to True. :param layer_path:", "nodes = [] layer = self.model.target_layer self.redo_effected_layer(layer.real_path) for node_path in", "layer is self.model.top_layer: self.old_color = layer.get_color(local=True) layer.color = self.color else:", "node_path in self.created_node_paths: n = self.model.target_layer.lookup(node_path) if n is not", "undo is called after a layer is saved. :param layer_path:", "nxt import nxt_io from nxt import GRID_SIZE import nxt_editor logger", "it works now we can only set one attr's data", "data. That way duplicate can just be a # setattr.", "node = self.model.target_layer.lookup(node_path) if not node: continue data = all_data['data']", "= self.model.lookup_layer(self.layer_path) # Re-create the node as an empty node", "node_path in self.node_paths: node_data = {} display_node = self.model.comp_layer.lookup(node_path) if", "node_data['parent'] = parent_path parent_node = layer.lookup(parent_path) ancestor_path = parent_path child_order", "getattr(node, INTERNAL_ATTRS.NAME) parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH) self.stage.get_node_data(node, layer) node_data =", "layer): stage = model.stage comp_layer = model.comp_layer new_node_paths = []", "ancestor_tuple: ancestor_path, ancestor_child_order = ancestor_tuple ancestor = layer.lookup(ancestor_path) if ancestor:", "in layer_color_index: if c not in open_layer_colors: layer_color = c", "dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data, other_removed_nodes=self.others) dirty_nodes += dirty", "changed_attrs += (attr_path,) self.model.attrs_changed.emit(changed_attrs) attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) val =", "self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.redo_effected_layer(layer.real_path) self.setText(\"Delete node: {}\".format(self.node_path)) class SetNodeAttributeData(NxtCommand): \"\"\"Set attribute value\"\"\"", "layer.lookup(self.node_path) self.prev_data = self.stage.get_node_attr_data(node, self.attr_name, layer, quiet=True) if self.prev_data: self.prev_data", "= node_hierarchy_data.get(top_node, []) top_node_descendant_list += [node] node_hierarchy_data[top_node_path] = top_node_descendant_list if", "dirty_nodes += dirty node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if", "for node_path, all_data in self.prev_node_data.items(): apply_data = {} node =", "paths curr_selection = model.selection new_paths = curr_selection + paths super(AddSelection,", "self.toggle_state() for layer_path in self.layer_paths: self.redo_effected_layer(layer_path) @processing def toggle_state(self): layer", "`_first_effected_by_redo` to False. This makes sure the layer is properly", "= self.model.lookup_layer(self.layer_path) # Remove our created empty nodes for node_path", "in self.layer_paths: self.undo_effected_layer(layer_path) def redo(self): self.layer_paths = [] self.toggle_state() for", "False. This makes sure the layer is properly marked as", "dirty = self.stage.add_node(name=self.node_data['name'], data=self.node_data['save_dict'], parent=parent, layer=layer.layer_idx(), comp_layer=comp_layer, fix_names=False) if self.node_data['break']:", "RevertInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, None,", "= self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.color = self.old_color else:", "(closest_ancestor_path, ancestor_child_order[:]) # Attr display data attr_display = self.model.get_attr_display_state(self.node_path) if", "= self.model._remove_breakpoint for node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) @processing", "self.model.remove_attr_display_state(old_node_path) if attr_state is not None: self.model._set_attr_display_state(new_node_path, attr_state) # set", "self.prev_selection = self.model.selection self.prev_node_data = {} self.created_node_paths = [] @processing", "self.model._set_node_pos(old_path, prev_pos, layer=source_layer) # delete any created nodes for node_path", "self).__init__(model) self.name = name self.data = data self.parent_path = parent_path", "self.redo_effected_layer(target_layer.real_path) for node_path in self.node_paths: node = source_layer.lookup(node_path) # duplicate", "Overload remove attr here to insure attr is deleted self.remove_attr", "{}'.format(self.node_path)) class DeleteNode(NxtCommand): def __init__(self, node_path, model, layer_path, other_removed_nodes): \"\"\"Delete", "self.model, layer) target_node = new_nodes[-1] self.created_node_paths += new_paths # self.model.node_added.emit(node_path)", "set position for un-parent if self.parent_node_path == nxt_path.WORLD: old_root =", "to {}\".format(self.real_path)) class RemoveLayer(ReferenceLayer): \"\"\"Remove existing layer\"\"\" def __init__(self, layer_path,", "layer.collapse[node_path] = prev_value self.model.comp_layer.collapse[node_path] = prev_value self.model.collapse_changed.emit(list(self.prev_values.keys())) @processing def redo(self):", "__init__(self, model, layer_path): super(ClearBreakpoints, self).__init__() self.model = model self.layer_path =", "by a save action pass def undo_effected_layer(self, layer_path): \"\"\"Removes layer", "self.undo_effected_layer(self.new_layer_path) self.model.layer_removed.emit(self.new_layer_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers) if 0", "a save action pass self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo, 'redo': eff_by_redo}", "model, layer_path): super(SetNodeExecuteSources, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_source, model, layer_path) def redo(self):", "eff_by_redo: try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed by a save", "undo(self): new_layer = self.model.lookup_layer(self.real_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer)", "self.prev_node_data = {} self.created_node_paths = [] layer = self.model.target_layer for", "{}\".format(self.node_path)) class LocalizeExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): exec_path =", "Fixme: Does not account for rebuilding proxy nodes for the", "name = getattr(node, INTERNAL_ATTRS.NAME) parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH) self.stage.get_node_data(node, layer)", "# set multiple attr's data. That way duplicate can just", "n in new_node_table: display_node = comp_layer.lookup(nn_p) if display_node is not", "__init__(self, layer_path, model): super(SoloToggleLayer, self).__init__(model) self.layer_path = layer_path self.model =", "import QUndoCommand # Internal from nxt_editor import colors from nxt_editor", "is self.model.top_layer: self.old_color = layer.get_color(local=True) layer.color = self.color else: self.old_color", "data.get('attributes', {}) attrs_to_keep = apply_data['attributes'].keys() apply_data['enabled'] = data.get('enabled') if data.get('instance'):", "self.node_paths: new_node_path = self.new_node_paths[idx] attr_state = self.model.remove_attr_display_state(new_node_path) if attr_state is", "= str(self.node_paths) self.setText(\"Parent {} to {}\".format(path_str, self.parent_node_path)) class AddAttribute(SetNodeAttributeData): \"\"\"Add", "undo(self): self.model.selection = self.prev_paths def redo(self): self.model.selection = self.new_paths self.setText('Set", "RevertExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, None,", "+= new_paths self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.model.selection = self.prev_selection self.setText('Revert {}'.format(self.node_path)) class ParentNodes(NxtCommand):", "model, layer_path): node = model.comp_layer.lookup(node_path) data = model.stage.get_node_attr_data(node, attr_name, model.comp_layer)", "[self.node_path] node = layer.lookup(self.node_path) self.prev_data = self.stage.get_node_attr_data(node, self.attr_name, layer, quiet=True)", "eff_by_redo} class AddNode(NxtCommand): \"\"\"Add a node to the graph\"\"\" def", "Re-create the node as an empty node new_nodes, new_paths, dirty", "val = str(self.data.get(META_ATTRS.VALUE)) self.setText(\"Set {} to {}\".format(attr_path, val)) # redo_debug(self,", "self.node_path = None self.created_node_paths = [] @processing def undo(self): layer", "{} attr from {}\".format(self.attr_name, self.node_path)) class RevertCompute(SetNodeAttributeValue): \"\"\"Revert compute\"\"\" def", "self.value: func = self.model._add_breakpoint else: func = self.model._remove_breakpoint for node_path", "path and the comp layer. It is important to note", "position has_parent = self.model.node_has_parent(new_node_path, target_layer) if not has_parent and new_node_path", "layer.set_alias(self.alias) else: self.old_alias = layer.get_alias(fallback_to_local=False) layer.set_alias_over(self.alias) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) self.setText(\"Set {}", "= {} self.node_path = node_path self.node_data = {} self.others =", "delete command not to re-comp self.created_node_paths = [] self.node_path =", "self.parent_node_path)) class AddAttribute(SetNodeAttributeData): \"\"\"Add an attribute to a node.\"\"\" def", "[] @processing def undo(self): target_layer = self.model.lookup_layer(self.target_layer_path) # delete duplicated", "= model.stage self.insert_idx = idx self.file_path = file_path self.real_path =", "self.model.undo_stack.index() cur_cmd = self.model.undo_stack.command(max(0, where_were_at - 1)) if cur_cmd is", "Attr display data attr_display = self.model.get_attr_display_state(self.node_path) if attr_display is not", "self.node_paths: node = source_layer.lookup(node_path) # duplicate node new, dirty =", "old_path child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER) if child_order_tuple: ancestor_path, child_order = child_order_tuple", "self.layer_path = layer_path self.prev_values = {} @processing def undo(self): layer", "self.setText(\"Collapsed {}\".format(path_str)) else: self.setText(\"Expanded {}\".format(path_str)) class SetNodeExecuteSources(SetNodeAttributeValue): \"\"\"Set node execute", "self.pos = pos or [0.0, 0.0] self.prev_selection = self.model.selection #", "layer=source_layer) # delete any created nodes for node_path in self.created_node_paths:", ":param layer_just_saved: string of layer real path :return: None \"\"\"", "{}\".format(self.node_path)) class SetNodeComment(SetNodeAttributeValue): \"\"\"Set node comment\"\"\" def __init__(self, node_path, comment,", "self.data.get(META_ATTRS.VALUE): self.setText(\"Enabled {}\".format(self.node_path)) else: self.setText(\"Disabled {}\".format(self.node_path)) class SetNodeCollapse(NxtCommand): \"\"\"Set the", "if val is None: self.setText(\"Removed exec input for {}\".format(self.node_path)) return", "comment} super(SetAttributeComment, self).__init__(node_path, attr_name, data, model, layer_path) def redo(self): super(SetAttributeComment,", "they are un-parented each node can be placed visually beside", "= \"Removed reference to {}\".format(layer_path) @processing def undo(self): super(RemoveLayer, self).redo()", "be shared by other DeleteNode commands in a command macro.", "self.redo_effected_layer(layer.real_path) self.setText(\"Delete node: {}\".format(self.node_path)) class SetNodeAttributeData(NxtCommand): \"\"\"Set attribute value\"\"\" def", "if layer is self.model.top_layer: self.old_color = layer.get_color(local=True) layer.color = self.color", "= self.model.lookup_layer(self.source_layer_path) target_layer = self.model.lookup_layer(self.target_layer_path) self.redo_effected_layer(target_layer.real_path) for node_path in self.node_paths:", "pos=self.pos, layer=layer) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = [self.node_path] self.redo_effected_layer(layer.real_path) self.setText('Added node: {}'.format(self.node_path))", "layer = self.model.lookup_layer(self.layer_path) # Re-create the node as an empty", "def __init__(self, node_path, exec_source, model, layer_path): super(SetNodeExecuteSources, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_source,", "self.created_node_paths = [] self.remove_attr = False self.prev_data = {} self.recomp", "self.model.lookup_layer(self.layer_path) node = layer.lookup(self.node_path) self.data = self.stage.get_node_attr_data(node, self.attr_name, layer) def", "pos or [0.0, 0.0] self.prev_selection = self.model.selection # resulting node", "dirty_nodes += dirty + [self.node_path] if self.node_path in self.model.selection: fix_selection", "def undo(self): user_dir.breakpoints[self.layer_path] = self.prev_breaks self.model.nodes_changed.emit(tuple(self.prev_breaks)) @processing def redo(self): self.prev_breaks", "self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: changed_attrs = () for", "if not node: continue data = all_data['data'] child_order = all_data['data'].get('child_order',", "for layer in self.stage._sub_layers: color = layer.color if color: color", "top layer store an overrides. :param color: string of new", "source_layer = self.stage.get_node_source_layer(node) source_layer.positions.pop(new_path) for parent_path, nodes_dict in common_parent_nodes.items(): self.stage.parent_nodes(nodes=list(nodes_dict.keys()),", "(INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger) from nxt import nxt_io from nxt", "new_node_path in self.new_node_paths: old_node_path = self.node_paths[idx] attr_state = self.model.remove_attr_display_state(old_node_path) if", "self.node_paths: layer.collapse[node_path] = self.value self.model.comp_layer.collapse[node_path] = self.value self.model.collapse_changed.emit(list(self.prev_values.keys())) if len(self.node_paths)", "self.model.layer_alias_changed.emit(self.layer_path) self.setText(\"Set {} alias to {}\".format(layer.filepath, self.alias)) class NewLayer(NxtCommand): \"\"\"Add", "in self.node_paths: self.prev_values[np] = self.model.get_node_collapse(np, layer) for node_path in self.node_paths:", "alias self.old_alias = '' self.model = model self.stage = model.stage", "in a list of descendants for each top node so", "state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_solo_changed.emit((self.layer_path,)) self.setText(\"Toggle {} soloed.\".format(layer.get_alias())) class SetLayerColor(NxtCommand): def", "def __init__(self, node_paths, value, model, layer_path): super(SetNodeBreakPoint, self).__init__() self.node_paths =", "parent=self.parent_path, layer=layer.layer_idx(), comp_layer=self.model.comp_layer) dirty_nodes += dirty self.node_path = layer.get_node_path(nodes[0]) self.model._set_node_pos(node_path=self.node_path,", "self.redo_effected_layer(layer.real_path) comp = self.model.comp_layer self.remove_attr = False self.created_node_paths = []", "self.stage.top_layer) if self.node_data['start']: self.model._add_start_node(self.node_path, layer) # restore layer data pos", "self.file_name } new_layer = self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) self.new_layer_path = new_layer.real_path self.redo_effected_layer(new_layer.real_path)", "properly marked as unsaved even if we undo an action", "pass def undo_effected_layer(self, layer_path): \"\"\"Removes layer from the model's set", "self.data.get(META_ATTRS.VALUE)} else: attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}} self.return_value = self.attr_name", "target_layer) if not has_parent and new_node_path != node_path: pos =", "__init__(self, node_path, value, model, layer_path): super(SetNodeEnabledState, self).__init__(node_path, INTERNAL_ATTRS.ENABLED, value, model,", "is not marked as effected in the model we mark", "the list and emit model signal new_node_path = target_layer.get_node_path(new_node) self.new_node_paths", "self.stage.get_node_local_attr_names(node_path, layers) for attr in local_attrs: if attr not in", "as it deletes node, this behavior is depended upon! :param", "on {}\".format(self.node_path)) class RevertNode(DeleteNode): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, model,", "if node is not None: self.stage.delete_node(node, layer) idx = 0", "self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: self.model.attrs_changed.emit(changed_attrs) if not self.recomp:", "self.stage.delete_node_attr(node=node, attr_name=attr) self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(layers[0].real_path) self.model.selection = self.prev_selection @processing def redo(self):", "self.model.undo_stack.command(max(0, where_were_at - 1)) if cur_cmd is self: return if", "new_pos prev_pos = self.old_positions[node_path] # Only letting it set text", "= is_start self.node_data['save_dict'] = get_node_as_dict(node) if self.node_data['break']: self.model._remove_breakpoint(self.node_path, layer) self.model._remove_breakpoint(self.node_path,", "nxt.nxt_layer import LAYERS, SAVE_KEY from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict,", "self.model.lookup_layer(self.layer_path) for node_path, new_pos in self.new_positions.items(): self.model._set_node_pos(node_path=node_path, pos=new_pos, layer=layer) if", "self.stage.top_layer) if self.node_data['start']: self.model._remove_start_node(self.node_path, layer) node = layer.lookup(self.node_path) source_layer =", "def redo_debug(cmd, start): update_time = str(int(round((time.time() - start) * 1000)))", "model.stage.get_node_code_lines(display_node, comp_layer) super(LocalizeCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self):", "def undo(self): super(DeleteAttribute, self).redo() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) def redo(self):", "= node_paths self.descendants = descendants self.source_layer_path = source_layer_path self.target_layer_path =", "layer_data.update(extra_data) self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) # Fixme: The next 2 lines each", "self.new_node_paths = [] source_layer = self.model.lookup_layer(self.source_layer_path) target_layer = self.model.lookup_layer(self.target_layer_path) self.redo_effected_layer(target_layer.real_path)", "+= [nn_p] new_nodes += [n] return new_nodes, new_node_paths, dirty def", "def __init__(self, model): super(NxtCommand, self).__init__() self.model = model self.model.layer_saved.connect(self.reset_layer_effected) self._layers_effected_by_me", "layer) self.new_node_paths = list(self.node_path_data.values()) idx = 0 for new_node_path in", "nxt_path.WORLD: old_root = nxt_path.get_root_path(old_node_path) new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE * 14,", "the effected state for a given layer with context to", "[]) top_node_descendant_list += [node] node_hierarchy_data[top_node_path] = top_node_descendant_list if not node_hierarchy_data:", "layer_just_saved): \"\"\"When the model marks a layer as saved we", "nodes, dirty = self.stage.add_node(name=self.name, data=self.data, parent=self.parent_path, layer=layer.layer_idx(), comp_layer=self.model.comp_layer) dirty_nodes +=", "layer_path self.created_node_paths = [] self.remove_attr = False self.prev_data = {}", "[] dirty_nodes = [] nodes, dirty = self.stage.add_node(name=self.name, data=self.data, parent=self.parent_path,", "self.rename_attribute(layer, self.new_attr_name, self.attr_name) self.undo_effected_layer(layer.real_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path)", "been saved & the undo effects it eff_by_redo = False", "self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) # Fixme: The next 2 lines each build", "value} super(AddAttribute, self).__init__(node_path, attr_name, data, model, layer_path) def redo(self): super(AddAttribute,", "None self.prev_selection = model.selection @processing def undo(self): start = time.time()", "is self: return if layer_just_saved in self._layers_effected_by_me: if eff_by_undo: #", "__init__(self, paths, model): self.added_paths = paths curr_selection = model.selection new_paths", "layer_path, other_removed_nodes): \"\"\"Delete node from the layer at the layer", "layer.lookup(ancestor_path) if ancestor: setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER, ancestor_child_order) self.model.selection = self.prev_selection #", "restore layer data pos = self.node_data.get('pos') if pos: self.model.top_layer.positions[self.node_path] =", "self.model.selection = self.prev_selection # Fixme: Does not account for rebuilding", "self.stage.delete_node(n, layer=layer, comp_layer=comp, remove_layer_data=False) n = layer.lookup(self.node_path) if n is", "\"\"\"Adds layer to the model's set of effected (unsaved) layers.", "= False eff_by_undo = True self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo, 'redo':", "colors from nxt_editor import user_dir from nxt import nxt_path from", "model: StageModel :param layer_path: String of layer realpath :param other_removed_nodes:", "redo(self): self.model.about_to_rename.emit() super(RenameNode, self).redo() self.node_path = self.return_value self.model.selection = [self.node_path]", "node_data = {} display_node = self.model.comp_layer.lookup(node_path) if not display_node: continue", "= self.node_data['parent'] # We don't want to fix names because", "if closest_ancestor: closest_ancestor = closest_ancestor[0] else: closest_ancestor = None closest_ancestor_path", "layer_path self.prev_values = {} @processing def undo(self): layer = self.model.lookup_layer(self.layer_path)", "self.model.selection = fix_selection self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.redo_effected_layer(layer.real_path) self.setText(\"Delete node: {}\".format(self.node_path)) class SetNodeAttributeData(NxtCommand):", "{'parent': parent, 'name': name, 'pos': self.model.get_node_pos(self.node_path), 'break': is_break} closest_ancestor =", "layer_just_saved: string of layer real path :return: None \"\"\" eff_by_undo,", "common_parent_nodes[prev_parent_path] = {node: old_path} else: common_parent_nodes[prev_parent_path][node] = old_path child_order_tuple =", "__init__(self, node_path, attr_name, new_attr_name, model, layer_path): super(RenameAttribute, self).__init__(model) self.node_path =", "\"\"\"Set attribute value\"\"\" def __init__(self, node_path, attr_name, data, model, layer_path):", "if it does not exist if not self.stage.node_attr_exists(node, self.attr_name): self.remove_attr", "layer\"\"\" def __init__(self, file_path, idx, model, chdir): super(ReferenceLayer, self).__init__(model) self.model", "{\"parent_layer\": parent_layer, \"filepath\": self.file_path, \"real_path\": self.real_path, \"alias\": layer_data['name'] } layer_data.update(extra_data)", "self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_data =", "[] source_layer = self.model.lookup_layer(self.source_layer_path) target_layer = self.model.lookup_layer(self.target_layer_path) self.redo_effected_layer(target_layer.real_path) for node_path", "= nxt_path.str_path_to_node_namespace(base_node_path) new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy, parent=None, layer=layer, comp_layer=comp_layer) for", "rebuilding proxy nodes for the dirty nodes dirty_set = tuple(set(dirty))", "attr_display = self.node_data.get('attr_display') if attr_display is not None: self.model._set_attr_display_state(self.node_path, attr_display)", "in self.node_paths: layer.collapse[node_path] = self.value self.model.comp_layer.collapse[node_path] = self.value self.model.collapse_changed.emit(list(self.prev_values.keys())) if", "attr_name, value, model, layer_path): data = {META_ATTRS.VALUE: value} super(SetNodeAttributeValue, self).__init__(node_path,", "= layer.ancestors(self.node_path) if closest_ancestor: closest_ancestor = closest_ancestor[0] else: closest_ancestor =", "= {} self.new_node_paths = [] self.created_node_paths = [] nodes =", "breakpoint from {}\".format(path_str)) class ClearBreakpoints(QUndoCommand): \"\"\"Clear all the breakpoints for", "def undo(self): new_layer = self.model.lookup_layer(self.new_layer_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path)", "idx=self.insert_idx) self.new_layer_path = new_layer.real_path self.redo_effected_layer(new_layer.real_path) # Fixme: The next 2", "dirty_set != (self.node_path,): self.model.update_comp_layer(rebuild=True) else: self.model.nodes_changed.emit(dirty_set) @processing def redo(self): layer", "nxt_path.WORLD: prev_pos = node_data['pos'] source_layer = self.stage.get_node_source_layer(node) self.model._set_node_pos(old_path, prev_pos, layer=source_layer)", "self.model._set_attr_display_state(self.node_path, attr_display) user_dir.breakpoints = self.prev_breaks ancestor_tuple = self.node_data.get('ancestor_child_order') if ancestor_tuple:", "True eff_by_undo = False else: # Now the undo of", "super(RevertInstancePath, self).redo() self.setText(\"Revert instance path on {}\".format(self.node_path)) class LocalizeExecPath(SetNodeAttributeValue): def", "super(SetNodeEnabledState, self).redo() if self.data.get(META_ATTRS.VALUE): self.setText(\"Enabled {}\".format(self.node_path)) else: self.setText(\"Disabled {}\".format(self.node_path)) class", "new_name = model.stage.get_unique_node_name(name=name, layer=layer, parent_path=parent_path, layer_only=True) super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME, new_name,", "text once, relying on consistent delta. x_delta = pos[0] -", "# layer was saved again. eff_by_redo = True eff_by_undo =", "x_delta = pos[0] - prev_pos[0] y_delta = pos[1] - prev_pos[1]", "super(SetNodeChildOrder, self).__init__(node_path, INTERNAL_ATTRS.CHILD_ORDER, child_order, model, layer_path) def redo(self): super(SetNodeChildOrder, self).redo()", "def redo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer self.node_data =", "0: rm_layer_data = True else: rm_layer_data = False for p", "layer, attr_name, new_attr_name): node = layer.lookup(self.node_path) self.stage.rename_node_attr(node, attr_name, new_attr_name, layer)", "new layer alias (name) :param layer_path: real path of layer", "= model self.layer_paths = [] def undo(self): self.toggle_state() for layer_path", "model self.stage = model.stage @processing def undo(self): layer = self.model.lookup_layer(self.layer_path)", "n = layer.lookup(self.node_path) if n is not None: if self.remove_attr:", "[self.node_path] if self.node_path in self.model.selection: fix_selection = self.model.selection[:] fix_selection.remove(self.node_path) self.model.selection", "super(RevertInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, None, model, layer_path) def redo(self): super(RevertInstancePath, self).redo()", "not None: self.node_data['attr_display'] = attr_display # get layer data is_start", "{self.attr_name: self.data}} self.return_value = self.attr_name _, dirties = self.stage.add_node(name=name, data=attr_data,", "not in open_layer_colors: layer_color = c break real_path = nxt_path.full_file_expand(self.file_path,", "value self.model = model self.layer_path = layer_path @processing def undo(self):", "new_pos, layer) super(InstanceNode, self).redo() self.return_value = self.new_path self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE))) class", "add new node path to the list and emit model", "model: StageModel \"\"\" super(SetLayerColor, self).__init__(model) self.layer_path = layer_path self.color =", "layer_path in self.model.effected_layers eff_by_undo, eff_by_redo = self._get_effects(layer_path) if not eff_by_undo", "target layer target_node = self.model.target_layer.lookup(node_path) if not target_node: new_nodes, new_paths,", "= self.value self.model.comp_layer.collapse[node_path] = self.value self.model.collapse_changed.emit(list(self.prev_values.keys())) if len(self.node_paths) == 1:", "if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or self.created_node_paths or self.attr_name", "un-parent if self.parent_node_path == nxt_path.WORLD: old_root = nxt_path.get_root_path(old_node_path) new_pos =", "redo(self): super(SetNodeComment, self).redo() self.setText(\"Changed comment on {}\".format(self.node_path)) class SetNodeInstance(SetNodeAttributeValue): \"\"\"Set", "self.created_node_paths: node = layer.lookup(node_path) if node is not None: self.stage.delete_node(node,", "be placed visually beside it's # original top node. node_hierarchy_data", "layer_color, SAVE_KEY.ALIAS: self.file_name } new_layer = self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) self.new_layer_path =", "{}\".format(self.node_path)) else: self.setText(\"Disabled {}\".format(self.node_path)) class SetNodeCollapse(NxtCommand): \"\"\"Set the node collapse", "= chdir @processing def undo(self): new_layer = self.model.lookup_layer(self.new_layer_path) if new_layer", "self).redo() self.return_value = self.new_path self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE))) class SetNodesPosition(NxtCommand): \"\"\"Move nodes\"\"\"", "file_path self.file_name = file_name self.chdir = chdir @processing def undo(self):", "= self.model.lookup_layer(self.layer_path) if not self.value: func = self.model._add_breakpoint else: func", "self.model.top_layer: self.old_color = layer.get_color(local=True) layer.color = self.color else: self.old_color =", "# Tells the delete command not to re-comp self.created_node_paths =", "If the layer is not marked as effected in the", "if n is not None: if self.remove_attr: self.stage.delete_node_attr(n, self.attr_name) dirties", "= None layer_data = nxt_io.load_file_data(self.real_path) extra_data = {\"parent_layer\": parent_layer, \"filepath\":", "!= (self.node_path,): self.model.update_comp_layer(rebuild=True) else: self.model.nodes_changed.emit(dirty_set) @processing def redo(self): layer =", "a node.\"\"\" def __init__(self, node_path, attr_name, value, model, layer_path): data", "self.old_positions[node_path] # Only letting it set text once, relying on", "= node_path layer = model.lookup_layer(layer_path) parent_path = nxt_path.get_parent_path(node_path) new_name =", "new_nodes[-1] self.created_node_paths += new_paths # self.model.node_added.emit(node_path) # preserve original data", "the node node = layer.lookup(self.node_path) dirties = [self.node_path] if node", "self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection def redo(self): self.created_node_paths = [] super(RevertNode,", "in attrs_to_keep: self.stage.delete_node_attr(node=node, attr_name=attr) self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(layers[0].real_path) self.model.selection = self.prev_selection @processing", "attr to {}\".format(self.attr_name, self.node_path)) class DeleteAttribute(AddAttribute): \"\"\"Delete attribute on a", "self).redo() self.setText(\"Change child order on {}\".format(self.node_path)) class SetLayerAlias(NxtCommand): \"\"\"Set Layer", "layer=layer, create=True, comp_layer=comp, **self.data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties +=", "self.prev_paths = self.model.selection def undo(self): self.model.selection = self.prev_paths def redo(self):", "if layer is self.model.top_layer: state = not layer.get_muted(local=True) layer.set_muted(state) self.layer_paths.append(layer.real_path)", "def undo(self): for node_path in self.created_node_paths: n = self.model.target_layer.lookup(node_path) if", "\"\"\"Parent Nodes\"\"\" def __init__(self, node_paths, parent_node_path, model): super(ParentNodes, self).__init__(model) self.parent_node_path", "True else: rm_layer_data = False for p in self.others[:]: self.others", "and Connections\"\"\" def __init__(self, paths, model): super(SetSelection, self).__init__() self.new_paths =", "data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment} super(SetAttributeComment, self).__init__(node_path, attr_name, data, model, layer_path)", "prev_value self.model.collapse_changed.emit(list(self.prev_values.keys())) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.prev_values", "node as the execution start point\"\"\" def __init__(self, node_path, value,", "self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data, other_removed_nodes=self.others) dirty_nodes += dirty + [self.node_path]", "moved to an index before this command and the same", "emit model signal new_node_path = target_layer.get_node_path(new_node) self.new_node_paths += [new_node_path] #", "try: new_selection.remove(path) except ValueError: continue super(RemoveFromSelection, self).__init__(new_selection, model) def redo(self):", "self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, None, model, layer_path) def redo(self): super(RevertInstancePath, self).redo() self.setText(\"Revert", "layer = self.model.target_layer self.undo_effected_layer(layer.real_path) # undo parent common_parent_nodes = {}", "def undo(self): super(RemoveLayer, self).redo() self.setText(self.text) @processing def redo(self): super(RemoveLayer, self).undo()", "path :return: None \"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved) where_were_at =", "if self.node_path in self.model.selection: fix_selection = self.model.selection[:] fix_selection.remove(self.node_path) self.model.selection =", "comp_layer=comp, fix_names=False) # Fixme: Targeted parenting would avoid the need", "layer.get_node_path(ancestor) child_order = self.stage.get_node_child_order(ancestor) node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path, child_order] self.prev_node_data[node_path] =", "comp_layer=comp_layer, remove_layer_data=rm_layer_data, other_removed_nodes=self.others) dirty_nodes += dirty + [self.node_path] if self.node_path", "in self.created_node_paths: n = layer.lookup(node_path) if n is not None:", "nxt_path.node_name_from_node_path(self.node_path) if self.attr_name in INTERNAL_ATTRS.ALL: self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name) attr_data =", "class RevertExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN,", "value, model, layer_path): super(SetNodeEnabledState, self).__init__(node_path, INTERNAL_ATTRS.ENABLED, value, model, layer_path) def", "layers) for attr in local_attrs: if attr not in attrs_to_keep:", "def redo(self): super(RevertInstancePath, self).redo() self.setText(\"Revert instance path on {}\".format(self.node_path)) class", "self.setText('Remove {} from selection'.format(self.rem_paths)) class LocalizeNodes(NxtCommand): \"\"\"Localize nodes\"\"\" def __init__(self,", "Tells the delete command not to re-comp self.created_node_paths = []", "in common_parent_nodes.items(): for node, old_path in nodes_dict.items(): node_data = self.prev_node_data[old_path]", "model): super(SetLayerAlias, self).__init__(model) self.layer_path = layer_path self.alias = alias self.old_alias", "None: if self.remove_attr: self.stage.delete_node_attr(n, self.attr_name) dirties += comp.get_node_dirties(self.node_path) else: result", "(first_effected_by_undo, first_effected_by_redo) \"\"\" first_eff_by_undo = False first_eff_by_redo = False try:", "= True created_node = True self.created_node_paths += [self.node_path] node =", "been marked as undo effects the # layer, meaning the", "by a save action pass self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo, 'redo':", "model, layer_path): super(SetNodeAttributeData, self).__init__(model) self.node_path = node_path self.nice_attr_name = attr_name", "new_name) self.new_path = new_path super(InstanceNode, self).__init__(new_path, INTERNAL_ATTRS.INSTANCE_PATH, node_path, model, target_layer_path)", "self).__init__() self.model = model self.layer_path = layer_path self.prev_breaks = []", "new_path super(InstanceNode, self).__init__(new_path, INTERNAL_ATTRS.INSTANCE_PATH, node_path, model, target_layer_path) def redo(self): node_path", "1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_color_index = [str(k.name()) for", ":return: None \"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_path) layer_saved = layer_path", "self.model.selection = self.new_paths self.setText('Set selection: {}'.format(str(self.new_paths))) class AddSelection(SetSelection): def __init__(self,", "ancestor = ancestors[0] ancestor_path = layer.get_node_path(ancestor) child_order = self.stage.get_node_child_order(ancestor) node_data[INTERNAL_ATTRS.CHILD_ORDER]", "self.model.update_comp_layer(rebuild=True) else: self.model.nodes_changed.emit(dirty_set) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer", "str(self.node_paths) self.setText('Localize {}'.format(str(path_str))) class LocalizeUserAttr(SetNodeAttributeData): \"\"\"Localize nodes\"\"\" def __init__(self, node_path,", "command and the same # layer was saved again. eff_by_redo", "prev_name = node_data['name'] name = getattr(node, INTERNAL_ATTRS.NAME) if name !=", "string of layer real path :return: None \"\"\" layer_unsaved =", "self.setText(\"Expanded {}\".format(path_str)) class SetNodeExecuteSources(SetNodeAttributeValue): \"\"\"Set node execute sources\"\"\" def __init__(self,", "attr_display) user_dir.breakpoints = self.prev_breaks ancestor_tuple = self.node_data.get('ancestor_child_order') if ancestor_tuple: ancestor_path,", "SetNodeAttributeData(NxtCommand): \"\"\"Set attribute value\"\"\" def __init__(self, node_path, attr_name, data, model,", "model, layer_path): inst_path = model.get_node_instance_path(node_path, model.comp_layer, expand=False) super(LocalizeInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH,", "for node_path, old_pos in self.old_positions.items(): self.model._set_node_pos(node_path=node_path, pos=old_pos, layer=layer) self.undo_effected_layer(self.layer_path) @processing", "remove_layer_data=rm_layer_data) dirty_nodes += dirty dirty_nodes += self.created_node_paths dirty_nodes += [self.node_path]", "as a break point\"\"\" def __init__(self, node_paths, value, model, layer_path):", "str(int(round((time.time() - start) * 1000))) logger.debug(\"Undo \" + cmd.text() +", "False for p in self.others[:]: self.others += comp_layer.get_node_dirties(p) _, dirty", "= _add_node_hierarchy(node_path, self.model, layer) target_node = new_nodes[-1] self.created_node_paths += new_paths", "is important to note that the other_removed_nodes list must be", "layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_alias = layer.get_alias(local=True)", "set text once, relying on consistent delta. x_delta = pos[0]", "start) @processing def redo(self): start = time.time() created_node = False", "super(InstanceNode, self).__init__(new_path, INTERNAL_ATTRS.INSTANCE_PATH, node_path, model, target_layer_path) def redo(self): node_path =", "{}) attrs_to_keep = apply_data['attributes'].keys() apply_data['enabled'] = data.get('enabled') if data.get('instance'): apply_data['instance']", "thus this redo has a # net zero effect on", "ancestor: setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER, ancestor_child_order) self.model.selection = self.prev_selection # Fixme: Does", "display data attr_display = self.model.get_attr_display_state(self.node_path) if attr_display is not None:", "= {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment} super(SetAttributeComment, self).__init__(node_path, attr_name, data, model, layer_path) def", "INTERNAL_ATTRS.NAME, new_name, model, layer_path) def undo(self): self.model.about_to_rename.emit() self.prev_data['force'] = True", "layer) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = list(self.node_path_data.values()) if len(self.node_paths)", "model, layer_path): data = {META_ATTRS.VALUE: value} super(AddAttribute, self).__init__(node_path, attr_name, data,", "parent=parent, layer=layer.layer_idx(), comp_layer=comp_layer, fix_names=False) if self.node_data['break']: self.model._add_breakpoint(self.node_path, layer) self.model._add_breakpoint(self.node_path, self.stage.top_layer)", "super(LocalizeUserAttr, self).__init__(node_path, attr_name, data, model, layer_path) class LocalizeCompute(SetNodeAttributeValue): \"\"\"Localize nodes\"\"\"", "super(RevertCompute, self).redo() self.setText(\"Revert compute on {}\".format(self.node_path)) class RenameAttribute(NxtCommand): \"\"\"Rename attribute\"\"\"", "is not a top layer the top layer store an", "self.redo_effected_layer(layer.real_path) for node_path in self.node_paths: node = layer.lookup(node_path) name =", "[] # get node selection for undo self.prev_selection = self.model.selection", "INTERNAL_ATTRS.NAME) parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH) self.stage.get_node_data(node, layer) node_data = self.stage.get_node_data(node,", "layer) self.model._remove_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._remove_start_node(self.node_path, layer) node = layer.lookup(self.node_path)", "layer_path): super(SetNodeCollapse, self).__init__(model) self.node_paths = node_paths self.value = value self.model", "is not None: self.stage.delete_node(node, layer) idx = 0 for old_node_path", "self: return if layer_just_saved in self._layers_effected_by_me: if eff_by_undo: # This", "redo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.attr_name, self.new_attr_name) self.redo_effected_layer(layer.real_path) def rename_attribute(self,", "redo(self): super(AddSelection, self).redo() self.setText('Add {} to selection'.format(self.added_paths)) class RemoveFromSelection(SetSelection): def", "information for each node. each node # path is placed", "class LocalizeNodes(NxtCommand): \"\"\"Localize nodes\"\"\" def __init__(self, node_paths, model): super(LocalizeNodes, self).__init__(model)", "= layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer, remove_layer_data=False)", "color for a given layer, if the layer is not", "self.undo_effected_layer(layer.real_path) for node_path, prev_value in self.prev_values.items(): layer.collapse[node_path] = prev_value self.model.comp_layer.collapse[node_path]", "self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_mute_changed.emit((self.layer_path,)) self.setText(\"Toggle {} muted.\".format(layer.get_alias())) class SoloToggleLayer(NxtCommand): \"\"\"Toggles soloing", "if self.layer_path in list(user_dir.breakpoints.keys()): user_dir.breakpoints.pop(self.layer_path) self.model.nodes_changed.emit(tuple(self.prev_breaks)) self.setText(\"Clear all breakpoints\") class", "node = layer.lookup(node_path) if node is not None: _, dirty", "node_paths self.value = value self.model = model self.stage = model.stage", "SetCompute(SetNodeAttributeValue): \"\"\"Set node code value\"\"\" def __init__(self, node_path, code_lines, model,", "self.model.get_is_node_start(self.node_path, layer) self.node_data['start'] = is_start self.node_data['save_dict'] = get_node_as_dict(node) if self.node_data['break']:", "model.get_node_instance_path(node_path, model.comp_layer, expand=False) super(LocalizeInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, inst_path, model, layer_path) def", "node_data = self.stage.get_node_data(node, layer) node_data['pos'] = self.model.get_node_pos(node_path) node_data['name'] = name", "= self.model._remove_breakpoint for node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) if", "set of effected (unsaved) layers. If this command was the", "= stage.add_node_hierarchy(node_hierarchy, parent=None, layer=layer, comp_layer=comp_layer) for nn_p, n in new_node_table:", "= target_layer_path self.stage = model.stage # get undo data self.prev_selection", "self.data}} self.return_value = self.attr_name _, dirties = self.stage.add_node(name=name, data=attr_data, parent=parent_path,", "(self.node_path,): self.model.update_comp_layer(rebuild=True) else: self.model.nodes_changed.emit(dirty_set) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path)", "node_path, model, layer_path): exec_path = model.get_node_exec_in(node_path) super(LocalizeExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_path,", "ReferenceLayer(NxtCommand): \"\"\"Refernce existing layer\"\"\" def __init__(self, file_path, idx, model, chdir):", "alias, layer_path, model): super(SetLayerAlias, self).__init__(model) self.layer_path = layer_path self.alias =", "{}\".format(old_name, new_name)) class SetAttributeComment(SetNodeAttributeData): \"\"\"Set attribute comment\"\"\" def __init__(self, node_path,", "get_node_as_dict(node) if self.node_data['break']: self.model._remove_breakpoint(self.node_path, layer) self.model._remove_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._remove_start_node(self.node_path,", "nodes self.new_node_paths = [] @processing def undo(self): target_layer = self.model.lookup_layer(self.target_layer_path)", "\"\"\"Set the node collapse state\"\"\" def __init__(self, node_paths, value, model,", "None, model, layer_path) # Get the data to be set", "parent, 'name': name, 'pos': self.model.get_node_pos(self.node_path), 'break': is_break} closest_ancestor = layer.ancestors(self.node_path)", "self).redo() self.setText(\"Changed comment on {}\".format(self.node_path)) class SetNodeInstance(SetNodeAttributeValue): \"\"\"Set node instance\"\"\"", "class NewLayer(NxtCommand): \"\"\"Add new layer\"\"\" def __init__(self, file_path, file_name, idx,", "layer is self.model.top_layer: layer.color = self.old_color else: layer.set_color_over(self.old_color) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path)", "= pos[1] - prev_pos[1] delta_str = '{}, {}'.format(x_delta, y_delta) if", "# Re-create the node as an empty node new_nodes, new_paths,", "self.return_value = self.attr_name _, dirties = self.stage.add_node(name=name, data=attr_data, parent=parent_path, layer=layer.layer_idx(),", "not node: continue data = all_data['data'] child_order = all_data['data'].get('child_order', [])", "(\"Set inst path on \" \"{} to {}\".format(self.node_path, self.data.get(META_ATTRS.VALUE))) self.setText(txt)", "layer.get_soloed(local=False) self.model.top_layer.set_solo_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_solo_changed.emit((self.layer_path,)) self.setText(\"Toggle {} soloed.\".format(layer.get_alias())) class", "self.prev_paths def redo(self): self.model.selection = self.new_paths self.setText('Set selection: {}'.format(str(self.new_paths))) class", "= old_path child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER) if child_order_tuple: ancestor_path, child_order =", "model, target_layer_path) def redo(self): node_path = self.data.get(META_ATTRS.VALUE) layer = self.model.lookup_layer(self.layer_path)", "effect the layer we mark it as such by setting", "attribute to a node.\"\"\" def __init__(self, node_path, attr_name, value, model,", "model, layer_path) def redo(self): super(SetNodeEnabledState, self).redo() if self.data.get(META_ATTRS.VALUE): self.setText(\"Enabled {}\".format(self.node_path))", "other_removed_nodes list must be shared by other DeleteNode commands in", "redo has a # net zero effect on the layer", "a node to the graph\"\"\" def __init__(self, name, data, parent_path,", "redo(self): super(RemoveFromSelection, self).redo() self.setText('Remove {} from selection'.format(self.rem_paths)) class LocalizeNodes(NxtCommand): \"\"\"Localize", "layer.lookup(node_path) name = getattr(node, INTERNAL_ATTRS.NAME) parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH) self.stage.get_node_data(node,", "self.layer_path = layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if", "from {}\".format(path_str)) class ClearBreakpoints(QUndoCommand): \"\"\"Clear all the breakpoints for a", "def redo(self): self.prev_breaks = user_dir.breakpoints.get(self.layer_path, []) if self.layer_path in list(user_dir.breakpoints.keys()):", "rm_layer_data = False comp_layer = self.model.comp_layer if node is not", "tuple([self.node_path] + self.created_node_paths) self.model.nodes_changed.emit(changed) self.model.selection = self.prev_selection # undo_debug(self, start)", "all the breakpoints for a given layer\"\"\" def __init__(self, model,", "display_node = comp_layer.lookup(nn_p) if display_node is not None: display_child_order =", "self.model._add_start_node(self.node_path, layer) # restore layer data pos = self.node_data.get('pos') if", "to {}\".format(old_name, new_name)) class SetAttributeComment(SetNodeAttributeData): \"\"\"Set attribute comment\"\"\" def __init__(self,", "Since a single command can effect layers in different ways.", "was called, thus this redo has a # net zero", "it doesn't exist on the target layer target_node = self.model.target_layer.lookup(node_path)", "layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) comp = self.model.comp_layer self.remove_attr = False", "self.model.comp_layer.collapse[node_path] = self.value self.model.collapse_changed.emit(list(self.prev_values.keys())) if len(self.node_paths) == 1: path_str =", "self.model.selection = self.prev_selection if len(self.node_paths) == 1: path_str = self.node_paths[0]", "= self.model.undo_stack.command(max(0, where_were_at - 1)) if cur_cmd is self: return", "self.created_node_paths: n = layer.lookup(node_path) if n is not None: self.stage.delete_node(n,", "the comp layer. It is important to note that the", "for undo self.prev_node_data = {} @processing def undo(self): layer =", "the layer is properly marked as unsaved even if we", "Layer was saved and then undo was called, thus this", "attr's data. That way duplicate can just be a #", "model self.model.layer_saved.connect(self.reset_layer_effected) self._layers_effected_by_me = {} def _get_effects(self, layer_path): \"\"\"Gets the", "dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data) dirty_nodes += dirty dirty_nodes", "\"\"\"Instance nodes on this graph\"\"\" def __init__(self, node_path, model, source_layer_path,", "{}\".format(self.node_path, val)) class SetNodeBreakPoint(QUndoCommand): \"\"\"Set node as a break point\"\"\"", "+= [color] layer_color = layer_color_index[0] for c in layer_color_index: if", "from nxt.nxt_layer import LAYERS, SAVE_KEY from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS,", "self.setText('Added node: {}'.format(self.node_path)) class DeleteNode(NxtCommand): def __init__(self, node_path, model, layer_path,", "by the stage as it deletes node, this behavior is", "= layer_path not in self.model.effected_layers if layer_saved: eff_by_undo = True", "node_positions self.old_positions = {} for path in self.new_positions.keys(): self.old_positions[path] =", "if c not in open_layer_colors: layer_color = c break real_path", "signal new_node_path = target_layer.get_node_path(new_node) self.new_node_paths += [new_node_path] # self.model.node_added.emit(new_node_path) #", "super(RemoveFromSelection, self).__init__(new_selection, model) def redo(self): super(RemoveFromSelection, self).redo() self.setText('Remove {} from", "= self.model.lookup_layer(self.layer_path) for node_path, old_pos in self.old_positions.items(): self.model._set_node_pos(node_path=node_path, pos=old_pos, layer=layer)", "self.parent_node_path == nxt_path.WORLD: old_root = nxt_path.get_root_path(old_node_path) new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE", "nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(src_name, model.comp_layer, parent_path=parent_path) new_path = nxt_path.join_node_paths(parent_path, new_name)", "undo_debug(cmd, start): update_time = str(int(round((time.time() - start) * 1000))) logger.debug(\"Undo", "eff_by_redo = self._get_effects(layer_path) if not eff_by_undo and layer_unsaved: return if", "even if we undo an action after saving it. :param", "\"\"\"Rename node\"\"\" def __init__(self, node_path, name, model, layer_path): self.old_node_path =", "in list(self.model.top_layer.positions.keys()): source_layer = self.stage.get_node_source_layer(node) source_layer.positions.pop(new_path) for parent_path, nodes_dict in", "model.stage self.insert_idx = idx self.file_path = file_path self.real_path = nxt_path.full_file_expand(self.file_path,", "self.return_value = None self.prev_selection = model.selection @processing def undo(self): start", "self.model.target_layer) if top_node is None: top_node = node top_node_path =", "n is not None: self.stage.delete_node(n, target_layer, remove_layer_data=True) self.model.selection = self.prev_selection", "{}'.format(self.data.get(META_ATTRS.VALUE))) class SetNodesPosition(NxtCommand): \"\"\"Move nodes\"\"\" def __init__(self, node_positions, model, layer_path):", "self.stage._sub_layers: color = layer.color if color: color = color.lower() open_layer_colors", "given layer, if the layer is not a top layer", "self.layer_path = layer_path self.stage = model.stage # command data self.pos", "paths new_selection = model.selection[:] for path in paths: try: new_selection.remove(path)", "SetNodesPosition(NxtCommand): \"\"\"Move nodes\"\"\" def __init__(self, node_positions, model, layer_path): super(SetNodesPosition, self).__init__(model)", "'' self.model = model self.stage = model.stage @processing def undo(self):", "selection for undo self.prev_selection = self.model.selection # get previous node", "all_data in self.prev_node_data.items(): apply_data = {} node = self.model.target_layer.lookup(node_path) if", "in different ways. :param layer_path: string of layer real path", "any created nodes for node_path in self.created_node_paths: node = layer.lookup(node_path)", "= [] self.created_node_paths = [] nodes = [] layer =", "self.prev_breaks = user_dir.breakpoints dirty_nodes = [] node = layer.lookup(self.node_path) #", "self.attr_name) dirties += comp.get_node_dirties(self.node_path) else: result = self.stage.node_setattr_data(node=n, attr=self.attr_name, layer=layer,", "target_layer = self.model.lookup_layer(self.target_layer_path) # delete duplicated nodes for node_path in", "new_node_paths += [nn_p] new_nodes += [n] return new_nodes, new_node_paths, dirty", "[] # delete any created nodes for node_path in self.created_node_paths:", "if self.node_data['break']: self.model._add_breakpoint(self.node_path, layer) self.model._add_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._add_start_node(self.node_path, layer)", "= {META_ATTRS.VALUE: value} super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data, model, layer_path) class", "def undo(self): layer = self.model.lookup_layer(self.layer_path) dirty_nodes = [] # delete", "when # they are un-parented each node can be placed", "= [] nodes, dirty = self.stage.add_node(name=self.name, data=self.data, parent=self.parent_path, layer=layer.layer_idx(), comp_layer=self.model.comp_layer)", "= [] self.created_node_paths = [] # get node selection for", "dirties = [self.node_path] if node is None: parent_path = nxt_path.get_parent_path(self.node_path)", "names because we know this node should be # named", "duplicate needs to get local + INTERNAL number of attrs.", "layer) node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx() >", "\"\"\"Delete node from the layer at the layer path and", "node should be # named what it was named when", "= nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else:", "layer was saved again. eff_by_redo = True eff_by_undo = False", "= name self.data = data self.parent_path = parent_path self.layer_path =", "node info parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH) name = getattr(node, INTERNAL_ATTRS.NAME)", "self).__init__(node_path, attr_name, data, model, layer_path) def redo(self): super(SetAttributeComment, self).redo() attr_path", "others) self.rebuild = False # Tells the delete command not", "func(self) self.model.processing.emit(False) return wrapper class NxtCommand(QUndoCommand): def __init__(self, model): super(NxtCommand,", "{} {}'.format(nodes_str, delta_str)) self.redo_effected_layer(layer.real_path) class SetSelection(QUndoCommand): \"\"\"Select Nodes and Connections\"\"\"", "= child_order_tuple ancestor = layer.lookup(ancestor_path) if ancestor: self.stage.set_node_child_order(ancestor, child_order, layer)", "on a node\"\"\" def __init__(self, node_path, attr_name, model, layer_path): super(DeleteAttribute,", "or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: self.model.attrs_changed.emit(changed_attrs) if not", "node = layer.lookup(new_path) if prev_parent_path not in list(common_parent_nodes.keys()): common_parent_nodes[prev_parent_path] =", "layer=layer) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = [self.node_path] self.redo_effected_layer(layer.real_path) self.setText('Added node: {}'.format(self.node_path)) class", "We don't touch the top layer in redo... self.undo_effected_layer(self.stage.top_layer.real_path) attr_display", "attr=self.attr_name, layer=layer, create=False, comp_layer=comp, **self.prev_data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties", "TODO: We should make another base command class that can", "if n is not None: self.stage.delete_node(n, layer=self.model.target_layer, remove_layer_data=False) layers =", "node so when # they are un-parented each node can", "SAVE_KEY from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger) from nxt", "original top node. node_hierarchy_data = {} if self.parent_node_path is nxt_path.WORLD:", "{} for np in self.node_paths: self.prev_values[np] = self.model.get_node_collapse(np, layer) for", "layer_data['name'] } layer_data.update(extra_data) self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) # Fixme: The next 2", "node_data['pos'] = self.model.get_node_pos(node_path) node_data['name'] = name node_data['parent'] = parent_path parent_node", "self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.set_alias(self.old_alias) else: layer.set_alias_over(self.old_alias) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path)", "return wrapper class NxtCommand(QUndoCommand): def __init__(self, model): super(NxtCommand, self).__init__() self.model", "self.return_value = self.new_path self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE))) class SetNodesPosition(NxtCommand): \"\"\"Move nodes\"\"\" def", "layer) self.created_node_paths += new_paths self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.model.selection = self.prev_selection self.setText('Revert {}'.format(self.node_path))", "= layer_path self.new_positions = node_positions self.old_positions = {} for path", "= self.model.get_is_node_breakpoint(self.node_path, layer) self.node_data = {'parent': parent, 'name': name, 'pos':", "layer) self.model.nodes_changed.emit(tuple(self.node_paths)) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if self.value:", "it was named when it was deleted new_nodes, dirty =", "self.model.comp_layer): self.model.starts_changed.emit(self.model.get_start_nodes()) self.setText(\"{} renamed to {}\".format(self.old_node_path, self.return_value)) class DuplicateNodes(NxtCommand): \"\"\"Duplicate", "self).redo() layer = self.model.lookup_layer(self.layer_path) # Re-create the node as an", "node_data['name'] = name node_data['parent'] = parent_path parent_node = layer.lookup(parent_path) ancestor_path", "attr_state) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection @processing def", "self.model.lookup_layer(self.layer_path) if self.value: func = self.model._add_breakpoint else: func = self.model._remove_breakpoint", "stage.add_node_hierarchy(node_hierarchy, parent=None, layer=layer, comp_layer=comp_layer) for nn_p, n in new_node_table: display_node", "that can be used to # set multiple attr's data.", "string of layer real path :return: None \"\"\" eff_by_undo, eff_by_redo", "exec input for {}\".format(self.node_path)) return self.setText(\"Set {} exec input to", "not layer.get_soloed(local=True) layer.set_soloed(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_soloed(local=False) self.model.top_layer.set_solo_over(layer.filepath,", "self.node_data['parent'] # We don't want to fix names because we", "and the undo queue # was moved to an index", "for un-parent if self.parent_node_path == nxt_path.WORLD: old_root = nxt_path.get_root_path(old_node_path) new_pos", "= self.prev_paths def redo(self): self.model.selection = self.new_paths self.setText('Set selection: {}'.format(str(self.new_paths)))", "layer_path): data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment} super(SetAttributeComment, self).__init__(node_path, attr_name, data, model,", "INTERNAL_ATTRS.NAME) is_break = self.model.get_is_node_breakpoint(self.node_path, layer) self.node_data = {'parent': parent, 'name':", "class ParentNodes(NxtCommand): \"\"\"Parent Nodes\"\"\" def __init__(self, node_paths, parent_node_path, model): super(ParentNodes,", "{}\".format(self.node_path)) class SetNodeCollapse(NxtCommand): \"\"\"Set the node collapse state\"\"\" def __init__(self,", "self.new_node_paths: n = target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n,", "= layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer=layer, comp_layer=comp,", "{} soloed.\".format(layer.get_alias())) class SetLayerColor(NxtCommand): def __init__(self, color, layer_path, model): \"\"\"Sets", "This case happens when undo is called after a layer", "apply_data['child_order'] = child_order apply_data['attributes'] = data.get('attributes', {}) attrs_to_keep = apply_data['attributes'].keys()", "self.prev_selection if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str", "model.stage comp_layer = model.comp_layer new_node_paths = [] new_nodes = []", "a break point\"\"\" def __init__(self, node_paths, value, model, layer_path): super(SetNodeBreakPoint,", "def redo(self): self.prev_node_data = {} self.created_node_paths = [] layer =", "self.created_node_paths = [] # get node selection for undo self.prev_selection", "node_path, value, model, layer_path): super(SetNodeEnabledState, self).__init__(node_path, INTERNAL_ATTRS.ENABLED, value, model, layer_path)", "self.prev_selection @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.created_node_paths = []", "node = layer.lookup(self.node_path) self.prev_data = self.stage.get_node_attr_data(node, self.attr_name, layer, quiet=True) if", "ancestor_child_order = getattr(closest_ancestor, INTERNAL_ATTRS.CHILD_ORDER) self.node_data['ancestor_child_order'] = (closest_ancestor_path, ancestor_child_order[:]) # Attr", "redo(self): self.prev_node_data = {} self.created_node_paths = [] layer = self.model.target_layer", "new_name = nxt_path.make_attr_path(self.node_path, new_attr_name) self.setText(\"Rename {} to {}\".format(old_name, new_name)) class", "point\"\"\" def __init__(self, node_paths, value, model, layer_path): super(SetNodeBreakPoint, self).__init__() self.node_paths", "undo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.set_alias(self.old_alias) else:", "\"\"\"Add new layer\"\"\" def __init__(self, file_path, file_name, idx, model, chdir):", "# Removed by a save action pass self._layers_effected_by_me[layer_path] = {'undo':", "= target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n, target_layer, remove_layer_data=True)", "self.setText('Set selection: {}'.format(str(self.new_paths))) class AddSelection(SetSelection): def __init__(self, paths, model): self.added_paths", "= {} node = self.model.target_layer.lookup(node_path) if not node: continue data", "= self.model.get_node_collapse(np, layer) for node_path in self.node_paths: layer.collapse[node_path] = self.value", "layer.set_alias(self.old_alias) else: layer.set_alias_over(self.old_alias) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) @processing def redo(self): layer =", "[pos[0] + 20, pos[1] + 20] self.model._set_node_pos(new_node_path, pos, layer=target_layer) self.model.selection", "value, model, layer_path): data = {META_ATTRS.VALUE: value} super(AddAttribute, self).__init__(node_path, attr_name,", "def __init__(self, node_path, instance_path, model, layer_path): super(SetNodeInstance, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, instance_path,", "not target_node: new_nodes, new_paths, dirty = _add_node_hierarchy(node_path, self.model, layer) target_node", "def redo(self): self.layer_paths = [] self.toggle_state() for layer_path in self.layer_paths:", "layer_path, others) self.rebuild = False # Tells the delete command", "self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_color_index = [str(k.name()) for k", "\"\"\"Localize nodes\"\"\" def __init__(self, node_path, attr_name, model, layer_path): node =", "node_data['name'] name = getattr(node, INTERNAL_ATTRS.NAME) if name != prev_name: self.stage.set_node_name(node,", "= (closest_ancestor_path, ancestor_child_order[:]) # Attr display data attr_display = self.model.get_attr_display_state(self.node_path)", "commands in a command macro. The list will be mutated", "name = nxt_path.node_name_from_node_path(self.node_path) if self.attr_name in INTERNAL_ATTRS.ALL: self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name)", "def __init__(self, node_path, code_lines, model, layer_path): super(SetCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines,", "should be # named what it was named when it", "pos=new_pos, layer=layer) if not delta_str: pos = new_pos prev_pos =", "if not self.recomp: changed = tuple([self.node_path] + self.created_node_paths) self.model.nodes_changed.emit(changed) self.model.selection", "{} from selection'.format(self.rem_paths)) class LocalizeNodes(NxtCommand): \"\"\"Localize nodes\"\"\" def __init__(self, node_paths,", "named what it was named when it was deleted new_nodes,", "or self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: changed_attrs", "def redo(self): # Overload remove attr here to insure attr", "== 1: nodes_str = node_path else: nodes_str = 'nodes' self.setText('Move", "context to this command. Since a single command can effect", "self.model.update_comp_layer(rebuild=True) if len(self.node_paths) == 1: nodes_str = self.node_paths[0] else: nodes_str", "color: string of new layer alias (name) :param layer_path: real", "each node. each node # path is placed in a", "enabled state\"\"\" def __init__(self, node_path, value, model, layer_path): super(SetNodeEnabledState, self).__init__(node_path,", "import nxt_io from nxt import GRID_SIZE import nxt_editor logger =", "if attr_state is not None: self.model._set_attr_display_state(old_node_path, attr_state) idx += 1", "__init__(self, node_paths, parent_node_path, model): super(ParentNodes, self).__init__(model) self.parent_node_path = parent_node_path self.parent_node", "in self._layers_effected_by_me: if eff_by_undo: # This command has already been", "self.node_path = layer.get_node_path(nodes[0]) self.model._set_node_pos(node_path=self.node_path, pos=self.pos, layer=layer) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = [self.node_path]", "META_ATTRS.SOURCE in data: data.pop(META_ATTRS.SOURCE) super(LocalizeUserAttr, self).__init__(node_path, attr_name, data, model, layer_path)", "not self.value: func = self.model._add_breakpoint else: func = self.model._remove_breakpoint for", "= self.node_paths[0] else: path_str = str(self.node_paths) if self.value: self.setText(\"Add breakpoint", "shared by other DeleteNode commands in a command macro. The", "name, data, parent_path, pos, model, layer_path): super(AddNode, self).__init__(model) self.name =", "node_hierarchy_data = {} if self.parent_node_path is nxt_path.WORLD: for node_path in", "to {}\".format(layer.filepath, self.color)) def _add_node_hierarchy(base_node_path, model, layer): stage = model.stage", "= True else: rm_layer_data = False comp_layer = self.model.comp_layer if", "nxt_path.join_node_paths(parent_path, new_name) self.new_path = new_path super(InstanceNode, self).__init__(new_path, INTERNAL_ATTRS.INSTANCE_PATH, node_path, model,", "layer.collapse[node_path] = self.value self.model.comp_layer.collapse[node_path] = self.value self.model.collapse_changed.emit(list(self.prev_values.keys())) if len(self.node_paths) ==", "self).redo() self.setText('Remove {} from selection'.format(self.rem_paths)) class LocalizeNodes(NxtCommand): \"\"\"Localize nodes\"\"\" def", "first_eff_by_undo = False first_eff_by_redo = False try: first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo']", "= self.model.target_layer.lookup(node_path) if not node: continue data = all_data['data'] child_order", "attr_state is not None: self.model._set_attr_display_state(old_node_path, attr_state) idx += 1 self.model.update_comp_layer(rebuild=True)", "exist if not self.stage.node_attr_exists(node, self.attr_name): self.remove_attr = True if not", "self.model.effected_layers if layer_saved: eff_by_undo = True # Set redo to", "= str(int(round((time.time() - start) * 1000))) logger.debug(\"Undo \" + cmd.text()", "value, model, layer_path): super(SetNodeCollapse, self).__init__(model) self.node_paths = node_paths self.value =", "self.created_node_paths += [self.node_path] node = layer.lookup(self.node_path) self.prev_data = self.stage.get_node_attr_data(node, self.attr_name,", "= str(self.node_paths) if self.value: self.setText(\"Collapsed {}\".format(path_str)) else: self.setText(\"Expanded {}\".format(path_str)) class", "layer in self.stage._sub_layers: color = layer.color if color: color =", "self.remove_attr = False self.prev_data = {} self.recomp = attr_name in", "self.model.selection # resulting node self.node_path = None self.created_node_paths = []", "if (self.remove_attr or self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties)", "INTERNAL_ATTRS.INSTANCE_PATH: dirties += self.return_value if self.attr_name in INTERNAL_ATTRS.ALL: dirties +=", "node_positions, model, layer_path): super(SetNodesPosition, self).__init__(model) self.model = model self.layer_path =", "self.node_data.get('attr_display') if attr_display is not None: self.model._set_attr_display_state(self.node_path, attr_display) user_dir.breakpoints =", "the layer is not marked as effected in the model", "def __init__(self, name, data, parent_path, pos, model, layer_path): super(AddNode, self).__init__(model)", "attr in local_attrs: if attr not in attrs_to_keep: self.stage.delete_node_attr(node=node, attr_name=attr)", "self.model.lookup_layer(self.layer_path) # Remove our created empty nodes for node_path in", "happens when undo is called after a layer is saved.", "self.return_value if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) if self.recomp:", "in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.set_target_layer(LAYERS.TOP) self.model.update_comp_layer(rebuild=True) self.model.layer_removed.emit(self.real_path) @processing def redo(self):", "n = layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer,", "super(RemoveLayer, self).__init__(layer_path, idx, model, None) self.text = \"Removed reference to", "self.new_node_paths = list(self.node_path_data.values()) idx = 0 for new_node_path in self.new_node_paths:", "= model self.stage = model.stage @processing def undo(self): layer =", "new_node_paths = [] new_nodes = [] node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path) new_node_table,", "None, model, layer_path) def redo(self): self.setText(\"Revert exec input on {}\".format(self.node_path))", "curr_selection = model.selection new_paths = curr_selection + paths super(AddSelection, self).__init__(new_paths,", "undo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.new_attr_name, self.attr_name) self.undo_effected_layer(layer.real_path) @processing def", "= layer.lookup(parent_path) ancestor_path = parent_path child_order = [] if parent_node:", "= model self.layer_path = layer_path self.new_positions = node_positions self.old_positions =", "= layer.get_node_path(ancestor) child_order = self.stage.get_node_child_order(ancestor) node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path, child_order] self.prev_node_data[node_path]", "INTERNAL_ATTRS.CHILD_ORDER) new_child_order = list_merger(display_child_order, old_child_order) setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order) new_node_paths +=", "self.model.effected_layers.add(layer_path) elif eff_by_redo: try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed by", "0.0] self.prev_selection = self.model.selection # resulting node self.node_path = None", "# Removed by a save action pass def undo_effected_layer(self, layer_path):", "__init__(self, node_path, attr_name, model, layer_path): node = model.comp_layer.lookup(node_path) data =", "super(SetAttributeComment, self).redo() attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) self.setText(\"Changed comment on {}\".format(attr_path))", "= self.model.lookup_layer(self.real_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.set_target_layer(LAYERS.TOP) self.model.update_comp_layer(rebuild=True)", "comment\"\"\" def __init__(self, node_path, attr_name, comment, model, layer_path): data =", "self.undo_effected_layer(layer.real_path) comp = self.model.comp_layer dirties = [self.node_path] # delete any", "dirties += comp.get_node_dirties(self.node_path) changed_attrs = () for dirty in dirties:", "the data to be set if undo is called layer", "None: self.model._set_attr_display_state(new_node_path, attr_state) # set position for un-parent if self.parent_node_path", "[self.node_path] self.redo_effected_layer(layer.real_path) self.setText('Added node: {}'.format(self.node_path)) class DeleteNode(NxtCommand): def __init__(self, node_path,", "super(RevertNode, self).redo() layer = self.model.lookup_layer(self.layer_path) # Re-create the node as", "list of node paths that will be deleted in this", "dirty = self.stage.add_node(name=self.name, data=self.data, parent=self.parent_path, layer=layer.layer_idx(), comp_layer=self.model.comp_layer) dirty_nodes += dirty", "Targeted parenting would avoid the need for a recomp if", "is saved. :param layer_path: string of layer real path :return:", "def processing(func): def wrapper(self): self.model.processing.emit(True) func(self) self.model.processing.emit(False) return wrapper class", "attr's data at a # time and duplicate needs to", "child_order apply_data['attributes'] = data.get('attributes', {}) attrs_to_keep = apply_data['attributes'].keys() apply_data['enabled'] =", "layer alias (name) :param layer_path: real path of layer :param", "self.undo_effected_layer(self.layer_path) if dirty_set != (self.node_path,): self.model.update_comp_layer(rebuild=True) else: self.model.nodes_changed.emit(dirty_set) @processing def", "class AddAttribute(SetNodeAttributeData): \"\"\"Add an attribute to a node.\"\"\" def __init__(self,", "str(self.node_paths) if self.value: self.setText(\"Add breakpoint to {}\".format(path_str)) else: self.setText(\"Remove breakpoint", "= self.model.lookup_layer(self.new_layer_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(LAYERS.TOP)", "know this node should be # named what it was", "= node_path else: nodes_str = 'nodes' self.setText('Move {} {}'.format(nodes_str, delta_str))", "layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer, remove_layer_data=False) super(RevertNode,", "top_node = self.stage.get_top_node(node, self.model.target_layer) if top_node is None: top_node =", "the same # layer was saved again. eff_by_redo = True", "self.model.set_target_layer(LAYERS.TOP) self.undo_effected_layer(self.new_layer_path) self.model.layer_removed.emit(self.new_layer_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers) if", "layer=layer.layer_idx(), comp_layer=self.model.comp_layer) dirty_nodes += dirty self.node_path = layer.get_node_path(nodes[0]) self.model._set_node_pos(node_path=self.node_path, pos=self.pos,", "= layer_path self.prev_breaks = [] @processing def undo(self): user_dir.breakpoints[self.layer_path] =", "redo(self): super(SetCompute, self).redo() self.setText(\"Changed compute on {}\".format(self.node_path)) class SetNodeComment(SetNodeAttributeValue): \"\"\"Set", "layer not the redo eff_by_redo = False eff_by_undo = True", "redo to False since now its been saved & the", "# self.model.node_added.emit(node_path) # preserve original data node_data['data'] = get_node_as_dict(target_node) #", "self.model.layer_color_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if layer is", "super(DeleteAttribute, self).undo() layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.setText(\"Remove {} attr from", "unsaved even if we undo an action after saving it.", "else: attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}} self.return_value = self.attr_name _,", "new_name)) class SetAttributeComment(SetNodeAttributeData): \"\"\"Set attribute comment\"\"\" def __init__(self, node_path, attr_name,", "self.prev_data['force'] = True super(RenameNode, self).undo() self.node_path = self.old_node_path self.model.selection =", "layer.color = self.color else: self.old_color = layer.get_color(fallback_to_local=False) layer.set_color_over(self.color) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path)", "self).redo() self.node_path = self.return_value self.model.selection = [self.node_path] if self.model.get_is_node_start(self.node_path, self.model.comp_layer):", "layer_saved = layer_path not in self.model.effected_layers if layer_saved: eff_by_undo =", "might be a bug? We don't touch the top layer", "= self.prev_selection @processing def redo(self): self.prev_node_data = {} self.created_node_paths =", "= str(self.data.get(META_ATTRS.VALUE)) self.setText(\"Set {} to {}\".format(attr_path, val)) # redo_debug(self, start)", "in self.node_paths: node = layer.lookup(node_path) name = getattr(node, INTERNAL_ATTRS.NAME) parent_path", "local + INTERNAL number of attrs. super(DuplicateNodes, self).__init__(model) self.node_paths =", "# get undo data self.prev_selection = self.model.selection self.prev_starts = []", "class RevertCompute(SetNodeAttributeValue): \"\"\"Revert compute\"\"\" def __init__(self, node_path, model, layer_path): super(RevertCompute,", "layer_path): super(RenameAttribute, self).__init__(model) self.node_path = node_path self.attr_name = attr_name self.new_attr_name", "self.model.collapse_changed.emit(list(self.prev_values.keys())) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.prev_values =", "action pass def undo_effected_layer(self, layer_path): \"\"\"Removes layer from the model's", "= self.node_paths[0] else: nodes_str = 'nodes' self.setText('Duplicated {}'.format(nodes_str)) class InstanceNode(SetNodeAttributeValue):", "node_paths self.value = value self.model = model self.layer_path = layer_path", "SAVE_KEY.REAL_PATH: real_path, SAVE_KEY.COLOR: layer_color, SAVE_KEY.ALIAS: self.file_name } new_layer = self.stage.new_sublayer(layer_data=layer_data,", "node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx() > 0:", "else: path_str = str(self.node_paths) self.setText('Localize {}'.format(str(path_str))) class LocalizeUserAttr(SetNodeAttributeData): \"\"\"Localize nodes\"\"\"", "self.node_path_data[old_path] node = layer.lookup(new_path) if prev_parent_path not in list(common_parent_nodes.keys()): common_parent_nodes[prev_parent_path]", "self.model.nodes_changed.emit(tuple(self.prev_breaks)) self.setText(\"Clear all breakpoints\") class SetNodeStartPoint(SetNodeAttributeValue): \"\"\"Set this node as", "layer.set_alias_over(self.old_alias) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if", "= model.comp_layer display_node = comp_layer.lookup(node_path) code_lines = model.stage.get_node_code_lines(display_node, comp_layer) super(LocalizeCompute,", "self.model.selection: fix_selection = self.model.selection[:] fix_selection.remove(self.node_path) self.model.selection = fix_selection self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.redo_effected_layer(layer.real_path)", "for dirty in dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs +=", "META_ATTRS, get_node_as_dict, list_merger) from nxt import nxt_io from nxt import", "def redo(self): self.model.about_to_rename.emit() super(RenameNode, self).redo() self.node_path = self.return_value self.model.selection =", "node.\"\"\" def __init__(self, node_path, attr_name, value, model, layer_path): data =", "super(SetNodeExecuteSources, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_source, model, layer_path) def redo(self): super(SetNodeExecuteSources, self).redo()", "reference to {}\".format(self.real_path)) class RemoveLayer(ReferenceLayer): \"\"\"Remove existing layer\"\"\" def __init__(self,", "{}\".format(layer_path) @processing def undo(self): super(RemoveLayer, self).redo() self.setText(self.text) @processing def redo(self):", "= layer.get_color(local=True) layer.color = self.color else: self.old_color = layer.get_color(fallback_to_local=False) layer.set_color_over(self.color)", "duplicate can just be a # setattr. The way it", "target_layer_path self.stage = model.stage # get undo data self.prev_selection =", "def __init__(self, paths, model): super(SetSelection, self).__init__() self.new_paths = paths self.model", "model.comp_layer, parent_path=parent_path) new_path = nxt_path.join_node_paths(parent_path, new_name) self.new_path = new_path super(InstanceNode,", "__init__(self, node_path, exec_source, model, layer_path): super(SetNodeExecuteSources, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_source, model,", "self.old_color = layer.get_color(local=True) layer.color = self.color else: self.old_color = layer.get_color(fallback_to_local=False)", "= not layer.get_muted(local=False) self.model.top_layer.set_mute_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_mute_changed.emit((self.layer_path,)) self.setText(\"Toggle {}", "= self.model.get_attr_display_state(self.node_path) if attr_display is not None: self.node_data['attr_display'] = attr_display", "= {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}} self.return_value = self.attr_name _, dirties =", "apply_data['instance'] = data['instance'] self.stage.transfer_node_data(node, self.model.target_layer, apply_data, self.model.comp_layer) local_attrs = self.stage.get_node_local_attr_names(node_path,", "{}\".format(self.node_path, self.data.get(META_ATTRS.VALUE))) self.setText(txt) class SetNodeEnabledState(SetNodeAttributeValue): \"\"\"Set node enabled state\"\"\" def", "delta_str: pos = new_pos prev_pos = self.old_positions[node_path] # Only letting", "self.model.layer_solo_changed.emit((self.layer_path,)) self.setText(\"Toggle {} soloed.\".format(layer.get_alias())) class SetLayerColor(NxtCommand): def __init__(self, color, layer_path,", "= [] @processing def undo(self): for node_path in self.created_node_paths: n", "None closest_ancestor_path = layer.get_node_path(closest_ancestor) if closest_ancestor_path: ancestor_child_order = getattr(closest_ancestor, INTERNAL_ATTRS.CHILD_ORDER)", "RenameAttribute(NxtCommand): \"\"\"Rename attribute\"\"\" def __init__(self, node_path, attr_name, new_attr_name, model, layer_path):", "other_removed_nodes=self.others) dirty_nodes += dirty + [self.node_path] if self.node_path in self.model.selection:", "node\"\"\" def __init__(self, node_path, name, model, layer_path): self.old_node_path = node_path", "def __init__(self, file_path, file_name, idx, model, chdir): super(NewLayer, self).__init__(model) self.new_layer_path", "open_layer_colors += [color] layer_color = layer_color_index[0] for c in layer_color_index:", "= True self.created_node_paths += [self.node_path] node = layer.lookup(self.node_path) self.prev_data =", "new_node_path = self.new_node_paths[idx] attr_state = self.model.remove_attr_display_state(new_node_path) if attr_state is not", "attr_name, new_attr_name, model, layer_path): super(RenameAttribute, self).__init__(model) self.node_path = node_path self.attr_name", "INTERNAL_ATTRS.CHILD_ORDER) self.node_data['ancestor_child_order'] = (closest_ancestor_path, ancestor_child_order[:]) # Attr display data attr_display", "self.layer_paths = [] def undo(self): self.toggle_state() for layer_path in self.layer_paths:", "INTERNAL_ATTRS.EXECUTE_IN, None, model, layer_path) def redo(self): self.setText(\"Revert exec input on", "apply_data = {} node = self.model.target_layer.lookup(node_path) if not node: continue", "return new_nodes, new_node_paths, dirty def undo_debug(cmd, start): update_time = str(int(round((time.time()", "self.stage = model.stage self.insert_idx = idx self.file_path = file_path self.file_name", "{}'.format(nodes_str)) class InstanceNode(SetNodeAttributeValue): \"\"\"Instance nodes on this graph\"\"\" def __init__(self,", "attr_name) new_name = nxt_path.make_attr_path(self.node_path, new_attr_name) self.setText(\"Rename {} to {}\".format(old_name, new_name))", "undo was called, thus this redo has a # net", "self.model.get_is_node_start(self.node_path, self.model.comp_layer): self.model.starts_changed.emit(self.model.get_start_nodes()) self.setText(\"{} renamed to {}\".format(self.old_node_path, self.return_value)) class DuplicateNodes(NxtCommand):", "(attr_path,) self.model.attrs_changed.emit(changed_attrs) attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) val = str(self.data.get(META_ATTRS.VALUE)) self.setText(\"Set", "eff_by_undo, 'redo': eff_by_redo} def redo_effected_layer(self, layer_path): \"\"\"Adds layer to the", "None: # delete node _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer,", "on the target layer target_node = self.model.target_layer.lookup(node_path) if not target_node:", "self.prev_breaks = {} self.node_path = node_path self.node_data = {} self.others", "self.data = self.stage.get_node_attr_data(node, self.attr_name, layer) def undo(self): super(DeleteAttribute, self).redo() layer", "it was deleted new_nodes, dirty = self.stage.add_node(name=self.node_data['name'], data=self.node_data['save_dict'], parent=parent, layer=layer.layer_idx(),", "self.setText(\"Revert compute on {}\".format(self.node_path)) class RenameAttribute(NxtCommand): \"\"\"Rename attribute\"\"\" def __init__(self,", "input on {}\".format(self.node_path)) class RevertExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path):", "redo(self): super(SetNodeInstance, self).redo() txt = (\"Set inst path on \"", "\"\"\"Remove existing layer\"\"\" def __init__(self, layer_path, model): idx = model.lookup_layer(layer_path).layer_idx()", "self.source_layer_path = source_layer_path self.target_layer_path = target_layer_path self.stage = model.stage #", "def redo(self): super(SetNodeComment, self).redo() self.setText(\"Changed comment on {}\".format(self.node_path)) class SetNodeInstance(SetNodeAttributeValue):", "\" + update_time + \"ms\") def redo_debug(cmd, start): update_time =", "self.new_attr_name) self.redo_effected_layer(layer.real_path) def rename_attribute(self, layer, attr_name, new_attr_name): node = layer.lookup(self.node_path)", "ancestor_path, ancestor_child_order = ancestor_tuple ancestor = layer.lookup(ancestor_path) if ancestor: setattr(ancestor,", "dirties += comp.get_node_dirties(self.node_path) else: result = self.stage.node_setattr_data(node=n, attr=self.attr_name, layer=layer, create=False,", "node can be placed visually beside it's # original top", "paths self.model = model self.prev_paths = self.model.selection def undo(self): self.model.selection", "make another base command class that can be used to", "else: nodes_str = 'nodes' self.setText('Move {} {}'.format(nodes_str, delta_str)) self.redo_effected_layer(layer.real_path) class", "path_str = str(self.node_paths) self.setText(\"Parent {} to {}\".format(path_str, self.parent_node_path)) class AddAttribute(SetNodeAttributeData):", "attr_state = self.model.remove_attr_display_state(new_node_path) if attr_state is not None: self.model._set_attr_display_state(old_node_path, attr_state)", "layer\"\"\" def __init__(self, file_path, file_name, idx, model, chdir): super(NewLayer, self).__init__(model)", "self.model.update_comp_layer(rebuild=True) self.model.layer_solo_changed.emit((self.layer_path,)) self.setText(\"Toggle {} soloed.\".format(layer.get_alias())) class SetLayerColor(NxtCommand): def __init__(self, color,", "to effect the layer we mark it as such by", "= {\"parent_layer\": parent_layer, \"filepath\": self.file_path, \"real_path\": self.real_path, \"alias\": layer_data['name'] }", "graph\"\"\" def __init__(self, name, data, parent_path, pos, model, layer_path): super(AddNode,", "{} self.created_node_paths = [] layer = self.model.target_layer for node_path in", "self).redo() val = self.data.get(META_ATTRS.VALUE) if val is None: self.setText(\"Removed exec", "prev_pos, layer=source_layer) # delete any created nodes for node_path in", "def redo(self): self.model.selection = self.new_paths self.setText('Set selection: {}'.format(str(self.new_paths))) class AddSelection(SetSelection):", "before this command and the same # layer was saved", "self.model.selection # resulting nodes self.new_node_paths = [] @processing def undo(self):", "not None: self.model._set_attr_display_state(old_node_path, attr_state) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection =", "child order on {}\".format(self.node_path)) class SetLayerAlias(NxtCommand): \"\"\"Set Layer Alias\"\"\" def", "= pos[0] - prev_pos[0] y_delta = pos[1] - prev_pos[1] delta_str", "saved. :param layer_path: string of layer real path :return: None", "name = getattr(node, INTERNAL_ATTRS.NAME) is_break = self.model.get_is_node_breakpoint(self.node_path, layer) self.node_data =", "to fix names because we know this node should be", "note that the other_removed_nodes list must be shared by other", "self.prev_data = {} self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP self.return_value =", "changed_attrs = () for dirty in dirties: attr_path = nxt_path.make_attr_path(dirty,", "ancestor_path, child_order = child_order_tuple ancestor = layer.lookup(ancestor_path) if ancestor: self.stage.set_node_child_order(ancestor,", "new_layer = self.model.lookup_layer(self.real_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.set_target_layer(LAYERS.TOP)", "NewLayer(NxtCommand): \"\"\"Add new layer\"\"\" def __init__(self, file_path, file_name, idx, model,", "nodes\"\"\" def __init__(self, node_positions, model, layer_path): super(SetNodesPosition, self).__init__(model) self.model =", "self.stage = model.stage self.prev_selection = self.model.selection self.prev_node_data = {} self.created_node_paths", "does not exist if not self.stage.node_attr_exists(node, self.attr_name): self.remove_attr = True", "layer = self.model.lookup_layer(self.layer_path) for node_path, old_pos in self.old_positions.items(): self.model._set_node_pos(node_path=node_path, pos=old_pos,", "self.setText(\"Changed comment on {}\".format(self.node_path)) class SetNodeInstance(SetNodeAttributeValue): \"\"\"Set node instance\"\"\" def", "== nxt_path.WORLD: old_root = nxt_path.get_root_path(old_node_path) new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE *", "first_eff_by_redo def reset_layer_effected(self, layer_just_saved): \"\"\"When the model marks a layer", "{'undo': eff_by_undo, 'redo': eff_by_redo} class AddNode(NxtCommand): \"\"\"Add a node to", "node_path def undo(self): layer = self.model.lookup_layer(self.layer_path) # Remove our created", "layer.set_soloed(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_soloed(local=False) self.model.top_layer.set_solo_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path)", "!= prev_name: self.stage.set_node_name(node, name=prev_name, layer=layer, force=True) # restore position if", "eff_by_undo = True # Set redo to False since now", "= layer.lookup(self.node_path) # get node info parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH)", "order on {}\".format(self.node_path)) class SetLayerAlias(NxtCommand): \"\"\"Set Layer Alias\"\"\" def __init__(self,", "getattr(n, INTERNAL_ATTRS.CHILD_ORDER) new_child_order = list_merger(display_child_order, old_child_order) setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order) new_node_paths", "self.model = model self.model.layer_saved.connect(self.reset_layer_effected) self._layers_effected_by_me = {} def _get_effects(self, layer_path):", "update_time = str(int(round((time.time() - start) * 1000))) logger.debug(cmd.text() + \"", "in local_attrs: if attr not in attrs_to_keep: self.stage.delete_node_attr(node=node, attr_name=attr) self.model.update_comp_layer(rebuild=True)", "input to {}\".format(self.node_path, val)) class SetNodeBreakPoint(QUndoCommand): \"\"\"Set node as a", "from Qt.QtWidgets import QUndoCommand # Internal from nxt_editor import colors", "string of new layer alias (name) :param layer_path: real path", "'redo': eff_by_redo} class AddNode(NxtCommand): \"\"\"Add a node to the graph\"\"\"", "node node = layer.lookup(self.node_path) dirties = [self.node_path] if node is", "the layer not the redo eff_by_redo = False eff_by_undo =", "new_selection = model.selection[:] for path in paths: try: new_selection.remove(path) except", "= self.model.selection self.prev_node_data = {} self.created_node_paths = [] @processing def", "if META_ATTRS.SOURCE in data: data.pop(META_ATTRS.SOURCE) super(LocalizeUserAttr, self).__init__(node_path, attr_name, data, model,", "ValueError: continue super(RemoveFromSelection, self).__init__(new_selection, model) def redo(self): super(RemoveFromSelection, self).redo() self.setText('Remove", "self.model.comp_layer if node is not None: # delete node _,", "if self.node_data['start']: self.model._remove_start_node(self.node_path, layer) node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node)", "False self.prev_selection = self.model.selection layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) comp =", "on {}\".format(self.node_path)) class SetNodeInstance(SetNodeAttributeValue): \"\"\"Set node instance\"\"\" def __init__(self, node_path,", "self.setText(\"Added reference to {}\".format(self.real_path)) class RemoveLayer(ReferenceLayer): \"\"\"Remove existing layer\"\"\" def", "old_child_order) setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order) new_node_paths += [nn_p] new_nodes += [n]", "= [] self.node_path = node_path def undo(self): layer = self.model.lookup_layer(self.layer_path)", "layer data pos = self.node_data.get('pos') if pos: self.model.top_layer.positions[self.node_path] = pos", "{}\".format(self.node_path)) class SetLayerAlias(NxtCommand): \"\"\"Set Layer Alias\"\"\" def __init__(self, alias, layer_path,", "new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy, parent=None, layer=layer, comp_layer=comp_layer) for nn_p, n", "in self.new_node_paths: old_node_path = self.node_paths[idx] attr_state = self.model.remove_attr_display_state(old_node_path) if attr_state", "model, layer_path) def redo(self): super(SetNodeExecuteSources, self).redo() val = self.data.get(META_ATTRS.VALUE) if", "return first_eff_by_undo, first_eff_by_redo def reset_layer_effected(self, layer_just_saved): \"\"\"When the model marks", "get undo data self.prev_selection = self.model.selection self.prev_starts = [] self.prev_breaks", "descendants self.source_layer_path = source_layer_path self.target_layer_path = target_layer_path self.stage = model.stage", "= layer.get_alias(local=True) layer.set_alias(self.alias) else: self.old_alias = layer.get_alias(fallback_to_local=False) layer.set_alias_over(self.alias) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path)", "self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_color = layer.get_color(local=True) layer.color =", "new_paths, dirty = _add_node_hierarchy(self.node_path, self.model, layer) self.created_node_paths += new_paths self.model.update_comp_layer(rebuild=bool(self.created_node_paths))", "# setattr. The way it works now we can only", "nxt_editor import user_dir from nxt import nxt_path from nxt.nxt_layer import", "get_node_as_dict(target_node) # localize source node self.stage.transfer_node_data(target_node, self.model.target_layer, display_node, self.model.comp_layer) self.prev_node_data[node_path]", "self.setText('Duplicated {}'.format(nodes_str)) class InstanceNode(SetNodeAttributeValue): \"\"\"Instance nodes on this graph\"\"\" def", "from the layer at the layer path and the comp", "= [] node = layer.lookup(self.node_path) # get node info parent", "when it was deleted new_nodes, dirty = self.stage.add_node(name=self.node_data['name'], data=self.node_data['save_dict'], parent=parent,", "= self.prev_selection if len(self.node_paths) == 1: path_str = self.node_paths[0] else:", "= self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.new_attr_name, self.attr_name) self.undo_effected_layer(layer.real_path) @processing def redo(self): layer", "target_layer_path): # TODO: We should make another base command class", "idx, model, None) self.text = \"Removed reference to {}\".format(layer_path) @processing", "self.layer_path = layer_path self.model = model self.layer_paths = [] def", "self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_solo_changed.emit((self.layer_path,)) self.setText(\"Toggle {} soloed.\".format(layer.get_alias())) class SetLayerColor(NxtCommand): def __init__(self,", "__init__(self, color, layer_path, model): \"\"\"Sets the color for a given", "comp_layer=comp, **self.data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += self.return_value if", "nodes\"\"\" def __init__(self, node_paths, model): super(LocalizeNodes, self).__init__(model) self.node_paths = node_paths", "of layer real path :return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo)", "the delete command not to re-comp self.created_node_paths = [] self.node_path", "are un-parented each node can be placed visually beside it's", "eff_by_undo and layer_unsaved: return if not eff_by_undo: self._layers_effected_by_me[layer_path] = {'undo':", "at a # time and duplicate needs to get local", "other_removed_nodes: list of node paths that will be deleted in", "list_merger(display_child_order, old_child_order) setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order) new_node_paths += [nn_p] new_nodes +=", "start=self.chdir) layer_data = {\"parent_layer\": parent_layer, SAVE_KEY.FILEPATH: self.file_path, SAVE_KEY.REAL_PATH: real_path, SAVE_KEY.COLOR:", "= self.stage.get_node_source_layer(node) self.model._set_node_pos(old_path, prev_pos, layer=source_layer) # delete any created nodes", "name node_data['parent'] = parent_path parent_node = layer.lookup(parent_path) ancestor_path = parent_path", "self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.set_target_layer(LAYERS.TOP) self.model.update_comp_layer(rebuild=True) self.model.layer_removed.emit(self.real_path) @processing def redo(self): sub_layer_count =", "inst path on \" \"{} to {}\".format(self.node_path, self.data.get(META_ATTRS.VALUE))) self.setText(txt) class", "= [] self.prev_breaks = {} self.node_path = node_path self.node_data =", "self.node_paths = node_paths self.descendants = descendants self.source_layer_path = source_layer_path self.target_layer_path", "an index before this command and the same # layer", "not None: self.stage.delete_node(n, layer=self.model.target_layer, remove_layer_data=False) layers = [self.model.target_layer] for node_path,", "target_node: new_nodes, new_paths, dirty = _add_node_hierarchy(node_path, self.model, layer) target_node =", "is nxt_path.WORLD: for node_path in self.node_paths: node = layer.lookup(node_path) top_node", "is None: top_node = node top_node_path = layer.get_node_path(top_node) top_node_descendant_list =", "layer_path): super(SetNodeStartPoint, self).__init__(node_path, INTERNAL_ATTRS.START_POINT, value, model, layer_path) class SetNodeChildOrder(SetNodeAttributeValue): \"\"\"Set", "child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER) if child_order_tuple: ancestor_path, child_order = child_order_tuple ancestor", "1 self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data =", "= '' self.model = model self.stage = model.stage @processing def", "+ 20] self.model._set_node_pos(new_node_path, pos, layer=target_layer) self.model.selection = new_selection self.model.update_comp_layer(rebuild=True) if", "model.stage.get_unique_node_name(src_name, model.comp_layer, parent_path=parent_path) new_path = nxt_path.join_node_paths(parent_path, new_name) self.new_path = new_path", "dirty_nodes = [] nodes, dirty = self.stage.add_node(name=self.name, data=self.data, parent=self.parent_path, layer=layer.layer_idx(),", "= layer.get_alias(fallback_to_local=False) layer.set_alias_over(self.alias) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) self.setText(\"Set {} alias to {}\".format(layer.filepath,", "comp_layer = model.comp_layer new_node_paths = [] new_nodes = [] node_hierarchy", "user_dir from nxt import nxt_path from nxt.nxt_layer import LAYERS, SAVE_KEY", "self.new_paths self.setText('Set selection: {}'.format(str(self.new_paths))) class AddSelection(SetSelection): def __init__(self, paths, model):", "prev_value in self.prev_values.items(): layer.collapse[node_path] = prev_value self.model.comp_layer.collapse[node_path] = prev_value self.model.collapse_changed.emit(list(self.prev_values.keys()))", "stage = model.stage comp_layer = model.comp_layer new_node_paths = [] new_nodes", "= getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER) old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER) new_child_order = list_merger(display_child_order,", "command. Since a single command can effect layers in different", "if closest_ancestor_path: ancestor_child_order = getattr(closest_ancestor, INTERNAL_ATTRS.CHILD_ORDER) self.node_data['ancestor_child_order'] = (closest_ancestor_path, ancestor_child_order[:])", "realpath :param other_removed_nodes: list of node paths that will be", "None: self.node_data['attr_display'] = attr_display # get layer data is_start =", "= layer_path self.color = color self.old_color = '' self.model =", "new_nodes, dirty = self.stage.add_node(name=self.node_data['name'], data=self.node_data['save_dict'], parent=parent, layer=layer.layer_idx(), comp_layer=comp_layer, fix_names=False) if", "apply_data['enabled'] = data.get('enabled') if data.get('instance'): apply_data['instance'] = data['instance'] self.stage.transfer_node_data(node, self.model.target_layer,", "self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: state = not layer.get_muted(local=True) layer.set_muted(state)", "is self.model.top_layer: state = not layer.get_soloed(local=True) layer.set_soloed(state) self.layer_paths.append(layer.real_path) else: state", "= getattr(n, INTERNAL_ATTRS.CHILD_ORDER) new_child_order = list_merger(display_child_order, old_child_order) setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order)", "= getattr(node, INTERNAL_ATTRS.PARENT_PATH) self.stage.get_node_data(node, layer) node_data = self.stage.get_node_data(node, layer) node_data['pos']", "self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.set_target_layer(LAYERS.TOP) self.model.update_comp_layer(rebuild=True) self.model.layer_removed.emit(self.real_path) @processing def redo(self): sub_layer_count", "@processing def undo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer parent", "target_layer.get_node_path(new_node) self.new_node_paths += [new_node_path] # self.model.node_added.emit(new_node_path) # set position has_parent", "super(RevertExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, None, model, layer_path) def redo(self): self.setText(\"Revert exec", "__init__(self, node_paths, value, model, layer_path): super(SetNodeBreakPoint, self).__init__() self.node_paths = node_paths", "on this graph\"\"\" def __init__(self, node_paths, descendants, model, source_layer_path, target_layer_path):", "set one attr's data at a # time and duplicate", "\"\"\"Toggles muting an existing layer\"\"\" def __init__(self, layer_path, model): super(MuteToggleLayer,", "ancestor_child_order[:]) # Attr display data attr_display = self.model.get_attr_display_state(self.node_path) if attr_display", "layer=layer, force=True) # restore position if self.parent_node_path != nxt_path.WORLD: prev_pos", "extra_data = {\"parent_layer\": parent_layer, \"filepath\": self.file_path, \"real_path\": self.real_path, \"alias\": layer_data['name']", "= self._layers_effected_by_me[layer_path]['undo'] except KeyError: pass try: first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo'] except", "= self.stage.node_setattr_data(node, self.attr_name, layer=layer, create=True, comp_layer=comp, **self.data) if self.attr_name ==", "= self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data) dirty_nodes += dirty dirty_nodes +=", "layer_path): super(RevertInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, None, model, layer_path) def redo(self): super(RevertInstancePath,", "compute\"\"\" def __init__(self, node_path, model, layer_path): super(RevertCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, [],", "list will be mutated by the stage as it deletes", "parent_path parent_node = layer.lookup(parent_path) ancestor_path = parent_path child_order = []", "if self.data.get(META_ATTRS.VALUE): self.setText(\"Enabled {}\".format(self.node_path)) else: self.setText(\"Disabled {}\".format(self.node_path)) class SetNodeCollapse(NxtCommand): \"\"\"Set", "continue super(RemoveFromSelection, self).__init__(new_selection, model) def redo(self): super(RemoveFromSelection, self).redo() self.setText('Remove {}", "else: layer.set_color_over(self.old_color) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path)", "existing layer\"\"\" def __init__(self, layer_path, model): super(SoloToggleLayer, self).__init__(model) self.layer_path =", "self.model = model self.prev_paths = self.model.selection def undo(self): self.model.selection =", "+= [node] # get current node hierarchy information for each", "self).redo() txt = (\"Set inst path on \" \"{} to", "nxt_editor import colors from nxt_editor import user_dir from nxt import", "copy import logging import time # External from Qt.QtWidgets import", "model self.stage = model.stage self.layer_path = layer_path self.prev_values = {}", "{} color to {}\".format(layer.filepath, self.color)) def _add_node_hierarchy(base_node_path, model, layer): stage", "start): update_time = str(int(round((time.time() - start) * 1000))) logger.debug(cmd.text() +", "fix_selection = self.model.selection[:] fix_selection.remove(self.node_path) self.model.selection = fix_selection self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.redo_effected_layer(layer.real_path) self.setText(\"Delete", "Fixme: Targeted parenting would avoid the need for a recomp", "def __init__(self, model, layer_path): super(ClearBreakpoints, self).__init__() self.model = model self.layer_path", "node as a break point\"\"\" def __init__(self, node_paths, value, model,", "node_path: String of node path :param model: StageModel :param layer_path:", "if parent_node: child_order = getattr(parent_node, INTERNAL_ATTRS.CHILD_ORDER) else: ancestors = layer.ancestors(node_path)", "INTERNAL_ATTRS.CHILD_ORDER, child_order, model, layer_path) def redo(self): super(SetNodeChildOrder, self).redo() self.setText(\"Change child", "layer_path) def redo(self): super(LocalizeExecPath, self).redo() self.setText(\"Localize exec input on {}\".format(self.node_path))", "input on {}\".format(self.node_path)) class RevertNode(DeleteNode): \"\"\"Localize nodes\"\"\" def __init__(self, node_path,", "node comment\"\"\" def __init__(self, node_path, comment, model, layer_path): super(SetNodeComment, self).__init__(node_path,", "and duplicate needs to get local + INTERNAL number of", "to {}\".format(self.node_path, val)) class SetNodeBreakPoint(QUndoCommand): \"\"\"Set node as a break", "self.stage.set_node_name(node, name=prev_name, layer=layer, force=True) # restore position if self.parent_node_path !=", "rename_attribute(self, layer, attr_name, new_attr_name): node = layer.lookup(self.node_path) self.stage.rename_node_attr(node, attr_name, new_attr_name,", "this node should be # named what it was named", "= {'parent': parent, 'name': name, 'pos': self.model.get_node_pos(self.node_path), 'break': is_break} closest_ancestor", "redo(self): super(SetAttributeComment, self).redo() attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) self.setText(\"Changed comment on", "[] @processing def undo(self): for node_path in self.created_node_paths: n =", "\"alias\": layer_data['name'] } layer_data.update(extra_data) self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) # Fixme: The next", "dirty node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx() >", "node_path, old_pos in self.old_positions.items(): self.model._set_node_pos(node_path=node_path, pos=old_pos, layer=layer) self.undo_effected_layer(self.layer_path) @processing def", "redo(self): super(LocalizeCompute, self).redo() self.setText(\"Localize compute on {}\".format(self.node_path)) class LocalizeInstancePath(SetNodeAttributeValue): def", "self.stage.duplicate_node(node=node, layer=target_layer, descendants=self.descendants) new_selection.append(target_layer.get_node_path(new[0])) # process new nodes for new_node", "meaning the layer has been saved and the undo queue", "'redo': eff_by_redo} def redo_effected_layer(self, layer_path): \"\"\"Adds layer to the model's", "string of layer real path :return: (bool, bool) | (first_effected_by_undo,", "def __init__(self, paths, model): self.added_paths = paths curr_selection = model.selection", "not None: if self.remove_attr: self.stage.delete_node_attr(n, self.attr_name) dirties += comp.get_node_dirties(self.node_path) else:", "to {}\".format(attr_path, val)) # redo_debug(self, start) class SetNodeAttributeValue(SetNodeAttributeData): def __init__(self,", "True else: rm_layer_data = False comp_layer = self.model.comp_layer if node", "self.redo_effected_layer(layer.real_path) self.prev_values = {} for np in self.node_paths: self.prev_values[np] =", "class InstanceNode(SetNodeAttributeValue): \"\"\"Instance nodes on this graph\"\"\" def __init__(self, node_path,", "super(SetNodeExecuteSources, self).redo() val = self.data.get(META_ATTRS.VALUE) if val is None: self.setText(\"Removed", "layer_path: real path of layer :param model: StageModel \"\"\" super(SetLayerColor,", "n is not None: self.stage.delete_node(n, layer=layer, comp_layer=comp, remove_layer_data=False) n =", "\"\"\"When the model marks a layer as saved we reset", "= self.model.comp_layer self.node_data = {} self.prev_starts = self.model.get_start_nodes(layer) self.prev_breaks =", "for a recomp if layer.descendants(self.node_path): self.recomp = True created_node =", "redo(self): delta_str = None layer = self.model.lookup_layer(self.layer_path) for node_path, new_pos", "eff_by_undo: # This command has already been marked as undo", "= logging.getLogger(nxt_editor.LOGGER_NAME) def processing(func): def wrapper(self): self.model.processing.emit(True) func(self) self.model.processing.emit(False) return", "True. :param layer_path: string of layer real path :return: None", "undo data self.prev_selection = self.model.selection self.prev_starts = [] self.prev_breaks =", "{}\".format(self.old_node_path, self.return_value)) class DuplicateNodes(NxtCommand): \"\"\"Duplicate nodes on this graph\"\"\" def", "paths, model): self.rem_paths = paths new_selection = model.selection[:] for path", "= str(self.node_paths) self.setText('Localize {}'.format(str(path_str))) class LocalizeUserAttr(SetNodeAttributeData): \"\"\"Localize nodes\"\"\" def __init__(self,", "self.prev_values.items(): layer.collapse[node_path] = prev_value self.model.comp_layer.collapse[node_path] = prev_value self.model.collapse_changed.emit(list(self.prev_values.keys())) @processing def", "next 2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.real_path) self.model.layer_added.emit(self.real_path) self.setText(\"Added", "code value\"\"\" def __init__(self, node_path, code_lines, model, layer_path): super(SetCompute, self).__init__(node_path,", "def __init__(self, node_path, value, model, layer_path): super(SetNodeEnabledState, self).__init__(node_path, INTERNAL_ATTRS.ENABLED, value,", "else: state = not layer.get_muted(local=False) self.model.top_layer.set_mute_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_mute_changed.emit((self.layer_path,))", "real path :return: None \"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved) where_were_at", "layer=target_layer) self.model.selection = new_selection self.model.update_comp_layer(rebuild=True) if len(self.node_paths) == 1: nodes_str", "src_name = nxt_path.node_name_from_node_path(node_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(src_name, model.comp_layer,", "layer.lookup(node_path) top_node = self.stage.get_top_node(node, self.model.target_layer) if top_node is None: top_node", "attribute on a node\"\"\" def __init__(self, node_path, attr_name, model, layer_path):", "model, layer_path): super(SetNodeStartPoint, self).__init__(node_path, INTERNAL_ATTRS.START_POINT, value, model, layer_path) class SetNodeChildOrder(SetNodeAttributeValue):", "self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.prev_values = {} for np in self.node_paths: self.prev_values[np]", "super(ReferenceLayer, self).__init__(model) self.model = model self.stage = model.stage self.insert_idx =", "DeleteAttribute(AddAttribute): \"\"\"Delete attribute on a node\"\"\" def __init__(self, node_path, attr_name,", "self.stage.get_node_source_layer(node) if source_layer.layer_idx() > 0: rm_layer_data = True else: rm_layer_data", "for np in self.node_paths: self.prev_values[np] = self.model.get_node_collapse(np, layer) for node_path", "{} node = self.model.target_layer.lookup(node_path) if not node: continue data =", "lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.real_path) self.model.layer_added.emit(self.real_path) self.setText(\"Added reference to", "- prev_pos[0] y_delta = pos[1] - prev_pos[1] delta_str = '{},", "+ [self.node_path] if self.node_path in self.model.selection: fix_selection = self.model.selection[:] fix_selection.remove(self.node_path)", "ancestor_tuple ancestor = layer.lookup(ancestor_path) if ancestor: setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER, ancestor_child_order) self.model.selection", "if pos: self.model.top_layer.positions[self.node_path] = pos # This might be a", "is self.model.top_layer: layer.set_alias(self.old_alias) else: layer.set_alias_over(self.old_alias) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) @processing def redo(self):", "as unsaved even if we undo an action after saving", "nodes_str = 'nodes' self.setText('Duplicated {}'.format(nodes_str)) class InstanceNode(SetNodeAttributeValue): \"\"\"Instance nodes on", "if self.value: self.setText(\"Add breakpoint to {}\".format(path_str)) else: self.setText(\"Remove breakpoint from", "self.setText(\"Changed comment on {}\".format(attr_path)) class SetCompute(SetNodeAttributeValue): \"\"\"Set node code value\"\"\"", "model, layer_path): super(SetNodeBreakPoint, self).__init__() self.node_paths = node_paths self.value = value", "def redo(self): node_path = self.data.get(META_ATTRS.VALUE) layer = self.model.lookup_layer(self.layer_path) new_pos =", "and layer_unsaved: return if not eff_by_undo: self._layers_effected_by_me[layer_path] = {'undo': False,", "# External from Qt.QtWidgets import QUndoCommand # Internal from nxt_editor", "import GRID_SIZE import nxt_editor logger = logging.getLogger(nxt_editor.LOGGER_NAME) def processing(func): def", "was named when it was deleted new_nodes, dirty = self.stage.add_node(name=self.node_data['name'],", "= get_node_as_dict(node) if self.node_data['break']: self.model._remove_breakpoint(self.node_path, layer) self.model._remove_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']:", "undo(self): start = time.time() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) comp =", "it eff_by_redo = False self.model.effected_layers.add(layer_path) elif eff_by_redo: try: self.model.effected_layers.remove(layer_path) except", "layer_path, model): super(MuteToggleLayer, self).__init__(model) self.layer_path = layer_path self.model = model", "node_path, attr_name, value, model, layer_path): data = {META_ATTRS.VALUE: value} super(AddAttribute,", "can effect layers in different ways. :param layer_path: string of", "else: rm_layer_data = False comp_layer = self.model.comp_layer if node is", "data.get('enabled') if data.get('instance'): apply_data['instance'] = data['instance'] self.stage.transfer_node_data(node, self.model.target_layer, apply_data, self.model.comp_layer)", "= self.stage.parent_nodes(nodes, self.parent_node_path, layer) self.new_node_paths = list(self.node_path_data.values()) idx = 0", "node = layer.lookup(self.node_path) dirties = [self.node_path] if node is None:", "RenameNode(SetNodeAttributeValue): \"\"\"Rename node\"\"\" def __init__(self, node_path, name, model, layer_path): self.old_node_path", "self).redo() self.setText('Add {} to selection'.format(self.added_paths)) class RemoveFromSelection(SetSelection): def __init__(self, paths,", "| \" + update_time + \"ms\") def redo_debug(cmd, start): update_time", "layer_path) def redo(self): super(SetCompute, self).redo() self.setText(\"Changed compute on {}\".format(self.node_path)) class", "def redo(self): super(RevertCompute, self).redo() self.setText(\"Revert compute on {}\".format(self.node_path)) class RenameAttribute(NxtCommand):", "= layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx() > 0: rm_layer_data", "class SetSelection(QUndoCommand): \"\"\"Select Nodes and Connections\"\"\" def __init__(self, paths, model):", "for node_path in self.node_paths: node_data = {} display_node = self.model.comp_layer.lookup(node_path)", "layer = self.model.lookup_layer(self.layer_path) dirty_nodes = [] # delete any created", "\"\"\"Rename attribute\"\"\" def __init__(self, node_path, attr_name, new_attr_name, model, layer_path): super(RenameAttribute,", "= node_paths self.value = value self.model = model self.layer_path =", "set attribute value this also adds the attribute if it", "effected. This case happens when undo is called after a", "delete duplicated nodes for node_path in self.new_node_paths: n = target_layer.lookup(node_path)", "new_name = model.stage.get_unique_node_name(src_name, model.comp_layer, parent_path=parent_path) new_path = nxt_path.join_node_paths(parent_path, new_name) self.new_path", "this command effects the layer not the redo eff_by_redo =", "restore position if self.parent_node_path != nxt_path.WORLD: prev_pos = node_data['pos'] source_layer", "new_attr_name): node = layer.lookup(self.node_path) self.stage.rename_node_attr(node, attr_name, new_attr_name, layer) self.model.update_comp_layer() old_name", "super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data, model, layer_path) class RenameNode(SetNodeAttributeValue): \"\"\"Rename node\"\"\"", "model, layer_path): super(SetNodeInstance, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, instance_path, model, layer_path) def redo(self):", "@processing def redo(self): self.prev_node_data = {} self.created_node_paths = [] layer", "eff_by_undo, eff_by_redo = self._get_effects(layer_path) layer_saved = layer_path not in self.model.effected_layers", "def redo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_alias", "data.get('instance'): apply_data['instance'] = data['instance'] self.stage.transfer_node_data(node, self.model.target_layer, apply_data, self.model.comp_layer) local_attrs =", "def __init__(self, node_path, model, layer_path, other_removed_nodes): \"\"\"Delete node from the", "self.prev_starts = [] self.prev_breaks = {} self.node_path = node_path self.node_data", "top layer in redo... self.undo_effected_layer(self.stage.top_layer.real_path) attr_display = self.node_data.get('attr_display') if attr_display", "way it works now we can only set one attr's", "is None: self.setText(\"Removed exec input for {}\".format(self.node_path)) return self.setText(\"Set {}", "self.prev_breaks = [] @processing def undo(self): user_dir.breakpoints[self.layer_path] = self.prev_breaks self.model.nodes_changed.emit(tuple(self.prev_breaks))", "if layer is self.model.top_layer: state = not layer.get_soloed(local=True) layer.set_soloed(state) self.layer_paths.append(layer.real_path)", "layer is not marked as effected in the model we", "delete any created nodes for node_path in self.created_node_paths: n =", "layer.lookup(self.node_path) if n is not None: if self.remove_attr: self.stage.delete_node_attr(n, self.attr_name)", "self.setText(\"Localize exec input on {}\".format(self.node_path)) class RevertExecPath(SetNodeAttributeValue): def __init__(self, node_path,", "model.stage @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if layer is", "create=False, comp_layer=comp, **self.prev_data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += result", "it as such by setting the class attr `_first_effected_by_redo` to", "self.stage.remove_sublayer(new_layer) self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(LAYERS.TOP) self.undo_effected_layer(self.new_layer_path) self.model.layer_removed.emit(self.new_layer_path) @processing def redo(self): sub_layer_count =", "redo eff_by_redo = False eff_by_undo = True self._layers_effected_by_me[layer_just_saved] = {'undo':", "fix_selection.remove(self.node_path) self.model.selection = fix_selection self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.redo_effected_layer(layer.real_path) self.setText(\"Delete node: {}\".format(self.node_path)) class", "and emit model signal new_node_path = target_layer.get_node_path(new_node) self.new_node_paths += [new_node_path]", "@processing def undo(self): layer = self.model.lookup_layer(self.layer_path) for node_path, old_pos in", "undo(self): self.toggle_state() for layer_path in self.layer_paths: self.undo_effected_layer(layer_path) def redo(self): self.layer_paths", "exist on the target layer target_node = self.model.target_layer.lookup(node_path) if not", "redo_debug(cmd, start): update_time = str(int(round((time.time() - start) * 1000))) logger.debug(cmd.text()", "self.node_paths: node = layer.lookup(node_path) name = getattr(node, INTERNAL_ATTRS.NAME) parent_path =", "self.value self.model.collapse_changed.emit(list(self.prev_values.keys())) if len(self.node_paths) == 1: path_str = self.node_paths[0] else:", "old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER) new_child_order = list_merger(display_child_order, old_child_order) setattr(n, INTERNAL_ATTRS.CHILD_ORDER,", "self.model.attrs_changed.emit(changed_attrs) attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) val = str(self.data.get(META_ATTRS.VALUE)) self.setText(\"Set {}", "(attr_path,) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or self.created_node_paths or", "= layer.get_node_path(nodes[0]) self.model._set_node_pos(node_path=self.node_path, pos=self.pos, layer=layer) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = [self.node_path] self.redo_effected_layer(layer.real_path)", "# delete duplicated nodes for node_path in self.new_node_paths: n =", "layer) node_data['pos'] = self.model.get_node_pos(node_path) node_data['name'] = name node_data['parent'] = parent_path", "= [] super(RevertNode, self).redo() layer = self.model.lookup_layer(self.layer_path) # Re-create the", "top_node_descendant_list = node_hierarchy_data.get(top_node, []) top_node_descendant_list += [node] node_hierarchy_data[top_node_path] = top_node_descendant_list", "self.model._set_node_pos(new_node_path, new_pos, layer) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = list(self.node_path_data.values())", "attr_name, comment, model, layer_path): data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment} super(SetAttributeComment, self).__init__(node_path,", "\"\"\"Set node as a break point\"\"\" def __init__(self, node_paths, value,", "= {} self.created_node_paths = [] @processing def undo(self): for node_path", "or self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: self.model.attrs_changed.emit(changed_attrs)", "delete node _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data) dirty_nodes", "self.prev_node_data.items(): apply_data = {} node = self.model.target_layer.lookup(node_path) if not node:", "this node as the execution start point\"\"\" def __init__(self, node_path,", "# get previous node data for all child nodes for", "child order\"\"\" def __init__(self, node_path, child_order, model, layer_path): super(SetNodeChildOrder, self).__init__(node_path,", "self._get_effects(layer_path) if not eff_by_undo and layer_unsaved: return if not eff_by_undo:", "a # time and duplicate needs to get local +", "self.old_positions.items(): self.model._set_node_pos(node_path=node_path, pos=old_pos, layer=layer) self.undo_effected_layer(self.layer_path) @processing def redo(self): delta_str =", "model, source_layer_path, target_layer_path): src_name = nxt_path.node_name_from_node_path(node_path) parent_path = nxt_path.get_parent_path(node_path) new_name", "+= comp.get_node_dirties(self.node_path) changed_attrs = () for dirty in dirties: attr_path", "None layer = self.model.lookup_layer(self.layer_path) for node_path, new_pos in self.new_positions.items(): self.model._set_node_pos(node_path=node_path,", "node. each node # path is placed in a list", "if color: color = color.lower() open_layer_colors += [color] layer_color =", ":param layer_path: string of layer real path :return: None \"\"\"", "layer_path): exec_path = model.get_node_exec_in(node_path) super(LocalizeExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_path, model, layer_path)", "# Only letting it set text once, relying on consistent", "node_path, model, layer_path): super(RevertExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, None, model, layer_path) def", "= self.old_color else: layer.set_color_over(self.old_color) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) @processing def redo(self): layer", "of effected (unsaved) layers. If this command was the first", "= layer.lookup(node_path) top_node = self.stage.get_top_node(node, self.model.target_layer) if top_node is None:", "one attr's data at a # time and duplicate needs", "self.model.selection = self.prev_selection self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(target_layer.real_path) @processing def redo(self): new_selection =", "self.created_node_paths = [] @processing def undo(self): for node_path in self.created_node_paths:", "not in self.model.effected_layers if layer_saved: eff_by_undo = True # Set", "other DeleteNode commands in a command macro. The list will", "layer.get_node_path(top_node) top_node_descendant_list = node_hierarchy_data.get(top_node, []) top_node_descendant_list += [node] node_hierarchy_data[top_node_path] =", "= str(self.node_paths) if self.value: self.setText(\"Add breakpoint to {}\".format(path_str)) else: self.setText(\"Remove", "self.undo_effected_layer(self.layer_path) @processing def redo(self): delta_str = None layer = self.model.lookup_layer(self.layer_path)", "model, layer_path) def redo(self): super(SetNodeInstance, self).redo() txt = (\"Set inst", "SetNodeBreakPoint(QUndoCommand): \"\"\"Set node as a break point\"\"\" def __init__(self, node_paths,", "= node_path self.nice_attr_name = attr_name self.attr_name = attr_name self.data =", "super(SoloToggleLayer, self).__init__(model) self.layer_path = layer_path self.model = model self.layer_paths =", "= {} def _get_effects(self, layer_path): \"\"\"Gets the effected state for", "self.model.selection = [self.node_path] if self.model.get_is_node_start(self.node_path, self.model.comp_layer): self.model.starts_changed.emit(self.model.get_start_nodes()) self.setText(\"{} renamed to", "= None layer = self.model.lookup_layer(self.layer_path) for node_path, new_pos in self.new_positions.items():", "def __init__(self, node_paths, model): super(LocalizeNodes, self).__init__(model) self.node_paths = node_paths self.model", "self.layer_path = layer_path self.color = color self.old_color = '' self.model", "INTERNAL_ATTRS.EXECUTE_IN, exec_source, model, layer_path) def redo(self): super(SetNodeExecuteSources, self).redo() val =", "layer_only=True) super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME, new_name, model, layer_path) def undo(self): self.model.about_to_rename.emit()", "undo an action after saving it. :param layer_just_saved: string of", "parent_path self.layer_path = layer_path self.stage = model.stage # command data", "= [] @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) dirty_nodes =", "real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir) layer_data = {\"parent_layer\": parent_layer, SAVE_KEY.FILEPATH: self.file_path,", "16, 0), layer) self.model._set_node_pos(self.new_path, new_pos, layer) super(InstanceNode, self).redo() self.return_value =", "model self.stage = model.stage self.insert_idx = idx self.file_path = file_path", "<= sub_layer_count: parent_layer = self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer", "self.setText(\"Delete node: {}\".format(self.node_path)) class SetNodeAttributeData(NxtCommand): \"\"\"Set attribute value\"\"\" def __init__(self,", "self.model = model self.layer_paths = [] def undo(self): self.toggle_state() for", "the # layer, meaning the layer has been saved and", "def __init__(self, node_paths, value, model, layer_path): super(SetNodeCollapse, self).__init__(model) self.node_paths =", "nxt_path.WORLD: for node_path in self.node_paths: node = layer.lookup(node_path) top_node =", "== 1: nodes_str = self.node_paths[0] else: nodes_str = 'nodes' self.setText('Duplicated", "# self.model.node_added.emit(new_node_path) # set position has_parent = self.model.node_has_parent(new_node_path, target_layer) if", "rm_layer_data = False for p in self.others[:]: self.others += comp_layer.get_node_dirties(p)", "beside it's # original top node. node_hierarchy_data = {} if", "= node top_node_path = layer.get_node_path(top_node) top_node_descendant_list = node_hierarchy_data.get(top_node, []) top_node_descendant_list", "= layer_path in self.model.effected_layers eff_by_undo, eff_by_redo = self._get_effects(layer_path) if not", "in self.new_node_paths: n = target_layer.lookup(node_path) if n is not None:", "layer=layer, parent_path=parent_path, layer_only=True) super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME, new_name, model, layer_path) def", "duplicate node new, dirty = self.stage.duplicate_node(node=node, layer=target_layer, descendants=self.descendants) new_selection.append(target_layer.get_node_path(new[0])) #", "= paths self.model = model self.prev_paths = self.model.selection def undo(self):", "in the model we mark it as effected. This case", "undo(self): layer = self.model.lookup_layer(self.layer_path) dirty_nodes = [] # delete any", "model.stage self.layer_path = layer_path self.created_node_paths = [] self.remove_attr = False", "model.comp_layer) if META_ATTRS.SOURCE in data: data.pop(META_ATTRS.SOURCE) super(LocalizeUserAttr, self).__init__(node_path, attr_name, data,", "idx, model, chdir): super(NewLayer, self).__init__(model) self.new_layer_path = None self.model =", "{META_ATTRS.VALUE: value} super(AddAttribute, self).__init__(node_path, attr_name, data, model, layer_path) def redo(self):", "SAVE_KEY.COLOR: layer_color, SAVE_KEY.ALIAS: self.file_name } new_layer = self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) self.new_layer_path", "node_path, child_order, model, layer_path): super(SetNodeChildOrder, self).__init__(node_path, INTERNAL_ATTRS.CHILD_ORDER, child_order, model, layer_path)", "result = self.stage.node_setattr_data(node=n, attr=self.attr_name, layer=layer, create=False, comp_layer=comp, **self.prev_data) if self.attr_name", "self.model.target_layer for node_path in self.node_paths: node_data = {} display_node =", "= {} @processing def undo(self): layer = self.model.target_layer self.undo_effected_layer(layer.real_path) #", "not a top layer the top layer store an overrides.", "self).redo() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) def redo(self): # Overload remove", "to note that the other_removed_nodes list must be shared by", "new_attr_name, model, layer_path): super(RenameAttribute, self).__init__(model) self.node_path = node_path self.attr_name =", "def redo(self): self.created_node_paths = [] super(RevertNode, self).redo() layer = self.model.lookup_layer(self.layer_path)", "layer, meaning the layer has been saved and the undo", "path_str = str(self.node_paths) self.setText('Localize {}'.format(str(path_str))) class LocalizeUserAttr(SetNodeAttributeData): \"\"\"Localize nodes\"\"\" def", "child_order, model, layer_path): super(SetNodeChildOrder, self).__init__(node_path, INTERNAL_ATTRS.CHILD_ORDER, child_order, model, layer_path) def", "The next 2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.new_layer_path) self.model.layer_added.emit(self.new_layer_path)", "= {} self.node_path_data = {} self.new_node_paths = [] self.created_node_paths =", "{} self.created_node_paths = [] @processing def undo(self): for node_path in", "= name node_data['parent'] = parent_path parent_node = layer.lookup(parent_path) ancestor_path =", "parent_node = layer.lookup(parent_path) ancestor_path = parent_path child_order = [] if", "{}\".format(self.new_layer_path)) class ReferenceLayer(NxtCommand): \"\"\"Refernce existing layer\"\"\" def __init__(self, file_path, idx,", "prev_parent_path = node_data['parent'] prev_parent_node = layer.lookup(prev_parent_path) new_path = self.node_path_data[old_path] node", "super(SetNodeComment, self).__init__(node_path, INTERNAL_ATTRS.COMMENT, comment, model, layer_path) def redo(self): super(SetNodeComment, self).redo()", "model.selection new_paths = curr_selection + paths super(AddSelection, self).__init__(new_paths, model) def", "is_break} closest_ancestor = layer.ancestors(self.node_path) if closest_ancestor: closest_ancestor = closest_ancestor[0] else:", "if self.parent_node_path is nxt_path.WORLD: for node_path in self.node_paths: node =", "closest_ancestor = closest_ancestor[0] else: closest_ancestor = None closest_ancestor_path = layer.get_node_path(closest_ancestor)", "code_lines, model, layer_path) def redo(self): super(SetCompute, self).redo() self.setText(\"Changed compute on", "# duplicate node new, dirty = self.stage.duplicate_node(node=node, layer=target_layer, descendants=self.descendants) new_selection.append(target_layer.get_node_path(new[0]))", "code_lines, model, layer_path): super(SetCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def", "val is None: self.setText(\"Removed exec input for {}\".format(self.node_path)) return self.setText(\"Set", "is not None: self.stage.delete_node(n, layer=layer, comp_layer=comp, remove_layer_data=False) n = layer.lookup(self.node_path)", "is_start = self.model.get_is_node_start(self.node_path, layer) self.node_data['start'] = is_start self.node_data['save_dict'] = get_node_as_dict(node)", "self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer self.node_data = {} self.prev_starts = self.model.get_start_nodes(layer)", "continue # add node if it doesn't exist on the", "import logging import time # External from Qt.QtWidgets import QUndoCommand", "def _get_effects(self, layer_path): \"\"\"Gets the effected state for a given", "parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(name=name, layer=layer, parent_path=parent_path, layer_only=True) super(RenameNode,", "can be placed visually beside it's # original top node.", "model, chdir): super(NewLayer, self).__init__(model) self.new_layer_path = None self.model = model", "to this command. Since a single command can effect layers", "stage as it deletes node, this behavior is depended upon!", "self.model.comp_layer parent = self.node_data['parent'] # We don't want to fix", "value} super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data, model, layer_path) class RenameNode(SetNodeAttributeValue): \"\"\"Rename", "self.model.target_layer.lookup(node_path) if not target_node: new_nodes, new_paths, dirty = _add_node_hierarchy(node_path, self.model,", "[str(k.name()) for k in colors.LAYER_COLORS] open_layer_colors = [] for layer", "self.chdir = chdir @processing def undo(self): new_layer = self.model.lookup_layer(self.new_layer_path) if", "this command was the first to effect the layer we", "= self.model.selection layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) comp = self.model.comp_layer self.remove_attr", "\"{} to {}\".format(self.node_path, self.data.get(META_ATTRS.VALUE))) self.setText(txt) class SetNodeEnabledState(SetNodeAttributeValue): \"\"\"Set node enabled", "layer = model.lookup_layer(layer_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(name=name, layer=layer,", "instance_path, model, layer_path) def redo(self): super(SetNodeInstance, self).redo() txt = (\"Set", "data self.stage = model.stage self.layer_path = layer_path self.created_node_paths = []", "if self.model.get_is_node_start(self.node_path, self.model.comp_layer): self.model.starts_changed.emit(self.model.get_start_nodes()) self.setText(\"{} renamed to {}\".format(self.old_node_path, self.return_value)) class", "self).redo() self.remove_attr = True self.setText(\"Add {} attr to {}\".format(self.attr_name, self.node_path))", "eff_by_redo = False eff_by_undo = True self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo,", "= [str(k.name()) for k in colors.LAYER_COLORS] open_layer_colors = [] for", "k in colors.LAYER_COLORS] open_layer_colors = [] for layer in self.stage._sub_layers:", "name = getattr(node, INTERNAL_ATTRS.NAME) if name != prev_name: self.stage.set_node_name(node, name=prev_name,", "get_node_as_dict, list_merger) from nxt import nxt_io from nxt import GRID_SIZE", "Now the undo of this command effects the layer not", "self.model.target_layer self.undo_effected_layer(layer.real_path) # undo parent common_parent_nodes = {} for old_path,", "node child order\"\"\" def __init__(self, node_path, child_order, model, layer_path): super(SetNodeChildOrder,", "saved & the undo effects it eff_by_redo = False self.model.effected_layers.add(layer_path)", "self.layer_path in list(user_dir.breakpoints.keys()): user_dir.breakpoints.pop(self.layer_path) self.model.nodes_changed.emit(tuple(self.prev_breaks)) self.setText(\"Clear all breakpoints\") class SetNodeStartPoint(SetNodeAttributeValue):", "NxtCommand(QUndoCommand): def __init__(self, model): super(NxtCommand, self).__init__() self.model = model self.model.layer_saved.connect(self.reset_layer_effected)", "super(AddSelection, self).__init__(new_paths, model) def redo(self): super(AddSelection, self).redo() self.setText('Add {} to", "= attr_name self.attr_name = attr_name self.data = data self.stage =", "this event loop. \"\"\" super(DeleteNode, self).__init__(model) self.layer_path = layer_path self.stage", "setattr. The way it works now we can only set", "data node_data['data'] = get_node_as_dict(target_node) # localize source node self.stage.transfer_node_data(target_node, self.model.target_layer,", "self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0), layer) self.model._set_node_pos(self.new_path, new_pos, layer) super(InstanceNode,", ":param model: StageModel :param layer_path: String of layer realpath :param", "node_path in self.created_node_paths: n = layer.lookup(node_path) if n is not", "has_parent = self.model.node_has_parent(new_node_path, target_layer) if not has_parent and new_node_path !=", "self.model.top_layer: layer.set_alias(self.old_alias) else: layer.set_alias_over(self.old_alias) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) @processing def redo(self): layer", "True} self.model.effected_layers.add(layer_path) else: # Layer was saved and then undo", "current node hierarchy information for each node. each node #", "pos: self.model.top_layer.positions[self.node_path] = pos # This might be a bug?", "self).__init__(model) self.model = model self.layer_path = layer_path self.new_positions = node_positions", "for a given layer with context to this command. Since", "the graph\"\"\" def __init__(self, name, data, parent_path, pos, model, layer_path):", "SetNodeAttributeValue(SetNodeAttributeData): def __init__(self, node_path, attr_name, value, model, layer_path): data =", "= file_path self.file_name = file_name self.chdir = chdir @processing def", "self.node_path = node_path def undo(self): layer = self.model.lookup_layer(self.layer_path) # Remove", "self.old_color else: layer.set_color_over(self.old_color) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) @processing def redo(self): layer =", "= [] nodes = [] layer = self.model.target_layer self.redo_effected_layer(layer.real_path) for", "node to the graph\"\"\" def __init__(self, name, data, parent_path, pos,", "= self.model.get_is_node_start(self.node_path, layer) self.node_data['start'] = is_start self.node_data['save_dict'] = get_node_as_dict(node) if", "model.comp_layer.lookup(node_path) data = model.stage.get_node_attr_data(node, attr_name, model.comp_layer) if META_ATTRS.SOURCE in data:", "self.stage.node_setattr_data(node, self.attr_name, layer=layer, create=True, comp_layer=comp, **self.data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH:", "to the graph\"\"\" def __init__(self, name, data, parent_path, pos, model,", "attr_display is not None: self.node_data['attr_display'] = attr_display # get layer", "self.parent_node_path, layer) self.new_node_paths = list(self.node_path_data.values()) idx = 0 for new_node_path", "20] self.model._set_node_pos(new_node_path, pos, layer=target_layer) self.model.selection = new_selection self.model.update_comp_layer(rebuild=True) if len(self.node_paths)", "self.prev_values = {} for np in self.node_paths: self.prev_values[np] = self.model.get_node_collapse(np,", "prev_pos = self.old_positions[node_path] # Only letting it set text once,", "None: self.setText(\"Removed exec input for {}\".format(self.node_path)) return self.setText(\"Set {} exec", "not marked as effected in the model we mark it", "nodes for node_path in self.created_node_paths: node = layer.lookup(node_path) if node", "model, layer_path) def redo(self): super(AddAttribute, self).redo() self.remove_attr = True self.setText(\"Add", "def redo(self): layer = self.model.lookup_layer(self.layer_path) if self.value: func = self.model._add_breakpoint", "self.setText(self.text) @processing def redo(self): super(RemoveLayer, self).undo() self.setText(self.text) class MuteToggleLayer(NxtCommand): \"\"\"Toggles", "[] @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) dirty_nodes = []", "1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) self.setText('Localize {}'.format(str(path_str)))", "model self.stage = model.stage self.node_paths = node_paths # resulting nodes", "the class attr `_first_effected_by_redo` to True. :param layer_path: string of", "if we undo an action after saving it. :param layer_just_saved:", "save action pass self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo, 'redo': eff_by_redo} class", "dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) self.model.attrs_changed.emit(changed_attrs) attr_path", "layer_path) class RenameNode(SetNodeAttributeValue): \"\"\"Rename node\"\"\" def __init__(self, node_path, name, model,", "__init__(self, node_path, model, layer_path): comp_layer = model.comp_layer display_node = comp_layer.lookup(node_path)", "1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_data = nxt_io.load_file_data(self.real_path) extra_data", "self.model.top_layer.set_solo_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_solo_changed.emit((self.layer_path,)) self.setText(\"Toggle {} soloed.\".format(layer.get_alias())) class SetLayerColor(NxtCommand):", "self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) for node_path, prev_value in self.prev_values.items(): layer.collapse[node_path] = prev_value", "value, model, layer_path) def redo(self): super(SetNodeEnabledState, self).redo() if self.data.get(META_ATTRS.VALUE): self.setText(\"Enabled", "display_node is not None: display_child_order = getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER) old_child_order =", "multiple attr's data. That way duplicate can just be a", "True self.setText(\"Add {} attr to {}\".format(self.attr_name, self.node_path)) class DeleteAttribute(AddAttribute): \"\"\"Delete", "undo(self): for node_path in self.created_node_paths: n = self.model.target_layer.lookup(node_path) if n", "self.node_paths[0] else: nodes_str = 'nodes' self.setText('Duplicated {}'.format(nodes_str)) class InstanceNode(SetNodeAttributeValue): \"\"\"Instance", "= layer.get_node_path(top_node) top_node_descendant_list = node_hierarchy_data.get(top_node, []) top_node_descendant_list += [node] node_hierarchy_data[top_node_path]", "layer, remove_layer_data=False) super(RevertNode, self).undo() self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection def redo(self):", "the execution start point\"\"\" def __init__(self, node_path, value, model, layer_path):", "__init__(self, node_paths, model): super(LocalizeNodes, self).__init__(model) self.node_paths = node_paths self.model =", "an overrides. :param color: string of new layer alias (name)", "in self.new_positions.items(): self.model._set_node_pos(node_path=node_path, pos=new_pos, layer=layer) if not delta_str: pos =", "self).undo() self.node_path = self.old_node_path self.model.selection = [self.node_path] def redo(self): self.model.about_to_rename.emit()", "in nodes_dict.items(): node_data = self.prev_node_data[old_path] # restore name prev_name =", "where_were_at - 1)) if cur_cmd is self: return if layer_just_saved", "want to fix names because we know this node should", "layer) self.node_data = {'parent': parent, 'name': name, 'pos': self.model.get_node_pos(self.node_path), 'break':", "old_pos in self.old_positions.items(): self.model._set_node_pos(node_path=node_path, pos=old_pos, layer=layer) self.undo_effected_layer(self.layer_path) @processing def redo(self):", "old_node_path = self.node_paths[idx] attr_state = self.model.remove_attr_display_state(old_node_path) if attr_state is not", "self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) changed_attrs = () for", "= None self.model = model self.stage = model.stage self.insert_idx =", "= False try: first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo'] except KeyError: pass try:", "= user_dir.breakpoints.get(self.layer_path, []) if self.layer_path in list(user_dir.breakpoints.keys()): user_dir.breakpoints.pop(self.layer_path) self.model.nodes_changed.emit(tuple(self.prev_breaks)) self.setText(\"Clear", "class ReferenceLayer(NxtCommand): \"\"\"Refernce existing layer\"\"\" def __init__(self, file_path, idx, model,", "self).__init__(node_path, attr_name, None, model, layer_path) # Get the data to", "node_paths, value, model, layer_path): super(SetNodeCollapse, self).__init__(model) self.node_paths = node_paths self.value", "layer_path): super(AddNode, self).__init__(model) self.name = name self.data = data self.parent_path", "data = model.stage.get_node_attr_data(node, attr_name, model.comp_layer) if META_ATTRS.SOURCE in data: data.pop(META_ATTRS.SOURCE)", "nxt_path from nxt.nxt_layer import LAYERS, SAVE_KEY from nxt.nxt_node import (INTERNAL_ATTRS,", "self.node_path)) class RevertCompute(SetNodeAttributeValue): \"\"\"Revert compute\"\"\" def __init__(self, node_path, model, layer_path):", "layer is not a top layer the top layer store", "= self.model.lookup_layer(self.layer_path) dirty_nodes = [] # delete any created nodes", "attr_name, data, model, layer_path) class LocalizeCompute(SetNodeAttributeValue): \"\"\"Localize nodes\"\"\" def __init__(self,", "common_parent_nodes.items(): for node, old_path in nodes_dict.items(): node_data = self.prev_node_data[old_path] #", "False self.prev_data = {} self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP self.return_value", "node_paths self.model = model self.stage = model.stage self.prev_selection = self.model.selection", "getattr(node, INTERNAL_ATTRS.PARENT_PATH) name = getattr(node, INTERNAL_ATTRS.NAME) is_break = self.model.get_is_node_breakpoint(self.node_path, layer)", "[] def undo(self): self.toggle_state() for layer_path in self.layer_paths: self.undo_effected_layer(layer_path) def", "= getattr(node, INTERNAL_ATTRS.NAME) if name != prev_name: self.stage.set_node_name(node, name=prev_name, layer=layer,", "self.model.lookup_layer(self.layer_path) dirty_nodes = [] # delete any created nodes for", "= None layer_color_index = [str(k.name()) for k in colors.LAYER_COLORS] open_layer_colors", "_get_effects(self, layer_path): \"\"\"Gets the effected state for a given layer", "layer path and the comp layer. It is important to", "self.node_data['break']: self.model._add_breakpoint(self.node_path, layer) self.model._add_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._add_start_node(self.node_path, layer) #", "node: continue data = all_data['data'] child_order = all_data['data'].get('child_order', []) apply_data['child_order']", "value, model, layer_path): super(SetNodeStartPoint, self).__init__(node_path, INTERNAL_ATTRS.START_POINT, value, model, layer_path) class", "= layer.get_color(fallback_to_local=False) layer.set_color_over(self.color) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) self.setText(\"Set {} color to {}\".format(layer.filepath,", "new_nodes, new_node_paths, dirty def undo_debug(cmd, start): update_time = str(int(round((time.time() -", "self.setText(\"Set {} to {}\".format(attr_path, val)) # redo_debug(self, start) class SetNodeAttributeValue(SetNodeAttributeData):", "SetLayerColor(NxtCommand): def __init__(self, color, layer_path, model): \"\"\"Sets the color for", "self.model.selection self.prev_node_data = {} self.created_node_paths = [] @processing def undo(self):", "+= [new_node_path] # self.model.node_added.emit(new_node_path) # set position has_parent = self.model.node_has_parent(new_node_path,", "def __init__(self, node_path, model, layer_path): inst_path = model.get_node_instance_path(node_path, model.comp_layer, expand=False)", "= pos or [0.0, 0.0] self.prev_selection = self.model.selection # resulting", "self).__init__() self.node_paths = node_paths self.value = value self.model = model", "= self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_color = layer.get_color(local=True) layer.color", "the breakpoints for a given layer\"\"\" def __init__(self, model, layer_path):", "layer is self.model.top_layer: state = not layer.get_soloed(local=True) layer.set_soloed(state) self.layer_paths.append(layer.real_path) else:", "action after saving it. :param layer_just_saved: string of layer real", "\"\"\"Localize nodes\"\"\" def __init__(self, node_paths, model): super(LocalizeNodes, self).__init__(model) self.node_paths =", "new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0), layer) self.model._set_node_pos(self.new_path, new_pos,", "self.model.top_layer.set_mute_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_mute_changed.emit((self.layer_path,)) self.setText(\"Toggle {} muted.\".format(layer.get_alias())) class SoloToggleLayer(NxtCommand):", "node = layer.lookup(self.node_path) # get node info parent = getattr(node,", "self.model._add_breakpoint else: func = self.model._remove_breakpoint for node_path in self.node_paths: func(node_path,", "layer_unsaved = layer_path in self.model.effected_layers eff_by_undo, eff_by_redo = self._get_effects(layer_path) if", "+= dirty dirty_nodes += self.created_node_paths dirty_nodes += [self.node_path] self.undo_effected_layer(self.layer_path) self.model.nodes_changed.emit(tuple(set(dirty_nodes)))", "= self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: state = not layer.get_soloed(local=True)", "delta. x_delta = pos[0] - prev_pos[0] y_delta = pos[1] -", "node data for all child nodes for undo self.prev_node_data =", "layer.color = self.old_color else: layer.set_color_over(self.old_color) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) @processing def redo(self):", "node_path = self.data.get(META_ATTRS.VALUE) layer = self.model.lookup_layer(self.layer_path) new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE", "INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) changed_attrs = () for dirty in", "should make another base command class that can be used", "import copy import logging import time # External from Qt.QtWidgets", "n is not None: if self.remove_attr: self.stage.delete_node_attr(n, self.attr_name) dirties +=", "[]) apply_data['child_order'] = child_order apply_data['attributes'] = data.get('attributes', {}) attrs_to_keep =", "Only letting it set text once, relying on consistent delta.", "name self.data = data self.parent_path = parent_path self.layer_path = layer_path", "to {}\".format(self.node_path, self.data.get(META_ATTRS.VALUE))) self.setText(txt) class SetNodeEnabledState(SetNodeAttributeValue): \"\"\"Set node enabled state\"\"\"", "self.descendants = descendants self.source_layer_path = source_layer_path self.target_layer_path = target_layer_path self.stage", "layer_path) class LocalizeCompute(SetNodeAttributeValue): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, model, layer_path):", "def undo(self): start = time.time() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) comp", "= attr_name self.new_attr_name = new_attr_name self.model = model self.stage =", "layer_path): comp_layer = model.comp_layer display_node = comp_layer.lookup(node_path) code_lines = model.stage.get_node_code_lines(display_node,", "self.node_data = {'parent': parent, 'name': name, 'pos': self.model.get_node_pos(self.node_path), 'break': is_break}", "node_path layer = model.lookup_layer(layer_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(name=name,", "closest_ancestor[0] else: closest_ancestor = None closest_ancestor_path = layer.get_node_path(closest_ancestor) if closest_ancestor_path:", "previous node data for all child nodes for undo self.prev_node_data", "+= result if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) changed_attrs", "0 for new_node_path in self.new_node_paths: old_node_path = self.node_paths[idx] attr_state =", "idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = list(self.node_path_data.values()) if len(self.node_paths) ==", "def __init__(self, color, layer_path, model): \"\"\"Sets the color for a", "self.model.effected_layers.add(layer_path) else: # Layer was saved and then undo was", "self.model.selection = self.prev_paths def redo(self): self.model.selection = self.new_paths self.setText('Set selection:", "= self.model.target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer=self.model.target_layer, remove_layer_data=False)", "source_layer = self.model.lookup_layer(self.source_layer_path) target_layer = self.model.lookup_layer(self.target_layer_path) self.redo_effected_layer(target_layer.real_path) for node_path in", "def __init__(self, node_path, attr_name, comment, model, layer_path): data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT):", "eff_by_redo = self._get_effects(layer_path) layer_saved = layer_path not in self.model.effected_layers if", "super(AddAttribute, self).__init__(node_path, attr_name, data, model, layer_path) def redo(self): super(AddAttribute, self).redo()", "def redo(self): super(SetNodeEnabledState, self).redo() if self.data.get(META_ATTRS.VALUE): self.setText(\"Enabled {}\".format(self.node_path)) else: self.setText(\"Disabled", "self.created_node_paths) self.model.nodes_changed.emit(changed) self.model.selection = self.prev_selection # undo_debug(self, start) @processing def", "self.node_paths = node_paths self.value = value self.model = model self.stage", "{} self.node_path_data = {} self.new_node_paths = [] self.created_node_paths = []", "model.comp_layer, expand=False) super(LocalizeInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, inst_path, model, layer_path) def redo(self):", "SetNodeChildOrder(SetNodeAttributeValue): \"\"\"Set node child order\"\"\" def __init__(self, node_path, child_order, model,", "if layer is self.model.top_layer: self.old_alias = layer.get_alias(local=True) layer.set_alias(self.alias) else: self.old_alias", "redo(self): super(RemoveLayer, self).undo() self.setText(self.text) class MuteToggleLayer(NxtCommand): \"\"\"Toggles muting an existing", "in self.layer_paths: self.redo_effected_layer(layer_path) @processing def toggle_state(self): layer = self.model.lookup_layer(self.layer_path) if", "False # Tells the delete command not to re-comp self.created_node_paths", "file_path, file_name, idx, model, chdir): super(NewLayer, self).__init__(model) self.new_layer_path = None", "self.setText(\"Localize instance path to {}\".format(self.node_path)) class RevertInstancePath(SetNodeAttributeValue): def __init__(self, node_path,", "placed visually beside it's # original top node. node_hierarchy_data =", "# set attribute value this also adds the attribute if", "model, layer_path) def redo(self): super(LocalizeInstancePath, self).redo() self.setText(\"Localize instance path to", "in self.node_paths: node_data = {} display_node = self.model.comp_layer.lookup(node_path) if not", "__init__(self, node_path, attr_name, model, layer_path): super(DeleteAttribute, self).__init__(node_path, attr_name, None, model,", "self.model._set_node_pos(node_path=self.node_path, pos=self.pos, layer=layer) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = [self.node_path] self.redo_effected_layer(layer.real_path) self.setText('Added node:", "{} @processing def undo(self): layer = self.model.target_layer self.undo_effected_layer(layer.real_path) # undo", "self.model = model self.layer_path = layer_path self.prev_breaks = [] @processing", "self.node_paths = node_paths # resulting nodes self.node_path_data = {} self.new_node_paths", "from {}\".format(self.attr_name, self.node_path)) class RevertCompute(SetNodeAttributeValue): \"\"\"Revert compute\"\"\" def __init__(self, node_path,", "[] if parent_node: child_order = getattr(parent_node, INTERNAL_ATTRS.CHILD_ORDER) else: ancestors =", "= nxt_io.load_file_data(self.real_path) extra_data = {\"parent_layer\": parent_layer, \"filepath\": self.file_path, \"real_path\": self.real_path,", "node = layer.lookup(node_path) if node is not None: self.stage.delete_node(node, layer)", "nodes_dict in common_parent_nodes.items(): self.stage.parent_nodes(nodes=list(nodes_dict.keys()), parent_path=parent_path, layer=layer) for parent_path, nodes_dict in", "'nodes' self.setText('Move {} {}'.format(nodes_str, delta_str)) self.redo_effected_layer(layer.real_path) class SetSelection(QUndoCommand): \"\"\"Select Nodes", "get layer data is_start = self.model.get_is_node_start(self.node_path, layer) self.node_data['start'] = is_start", "= model.stage self.layer_path = layer_path self.created_node_paths = [] self.remove_attr =", "= all_data['data'].get('child_order', []) apply_data['child_order'] = child_order apply_data['attributes'] = data.get('attributes', {})", "[self.node_path] self.undo_effected_layer(self.layer_path) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = self.prev_selection @processing def redo(self): layer", "= [] new_nodes = [] node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path) new_node_table, dirty", "muted.\".format(layer.get_alias())) class SoloToggleLayer(NxtCommand): \"\"\"Toggles soloing an existing layer\"\"\" def __init__(self,", "= alias self.old_alias = '' self.model = model self.stage =", "wrapper(self): self.model.processing.emit(True) func(self) self.model.processing.emit(False) return wrapper class NxtCommand(QUndoCommand): def __init__(self,", "dirty_nodes += self.created_node_paths dirty_nodes += [self.node_path] self.undo_effected_layer(self.layer_path) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection =", "now we can only set one attr's data at a", "super(SetNodeAttributeData, self).__init__(model) self.node_path = node_path self.nice_attr_name = attr_name self.attr_name =", "self.model.get_pos_offset(old_root, (GRID_SIZE * 14, GRID_SIZE), self.model.top_layer) self.model._set_node_pos(new_node_path, new_pos, layer) idx", "self.old_color = layer.get_color(fallback_to_local=False) layer.set_color_over(self.color) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) self.setText(\"Set {} color to", "self.layer_path = layer_path self.prev_breaks = [] @processing def undo(self): user_dir.breakpoints[self.layer_path]", "[] super(RevertNode, self).redo() layer = self.model.lookup_layer(self.layer_path) # Re-create the node", "can only set one attr's data at a # time", "DeleteNode commands in a command macro. The list will be", "def redo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.attr_name, self.new_attr_name) self.redo_effected_layer(layer.real_path) def", "from the model's set of effected (unsaved) layers. If the", "self.setText('Move {} {}'.format(nodes_str, delta_str)) self.redo_effected_layer(layer.real_path) class SetSelection(QUndoCommand): \"\"\"Select Nodes and", "@processing def undo(self): user_dir.breakpoints[self.layer_path] = self.prev_breaks self.model.nodes_changed.emit(tuple(self.prev_breaks)) @processing def redo(self):", "data pos = self.node_data.get('pos') if pos: self.model.top_layer.positions[self.node_path] = pos #", "self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) self.setText(\"Set {} color to {}\".format(layer.filepath, self.color)) def _add_node_hierarchy(base_node_path,", "self.alias = alias self.old_alias = '' self.model = model self.stage", "comp_layer=comp_layer, fix_names=False) if self.node_data['break']: self.model._add_breakpoint(self.node_path, layer) self.model._add_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']:", "resulting nodes self.new_node_paths = [] @processing def undo(self): target_layer =", "+ \" | \" + update_time + \"ms\") def redo_debug(cmd,", "for node_path in self.created_node_paths: node = layer.lookup(node_path) if node is", "parent_path, pos, model, layer_path): super(AddNode, self).__init__(model) self.name = name self.data", "comp.get_node_dirties(self.node_path) changed_attrs = () for dirty in dirties: attr_path =", "a command macro. The list will be mutated by the", ":return: (bool, bool) | (first_effected_by_undo, first_effected_by_redo) \"\"\" first_eff_by_undo = False", "except KeyError: pass try: first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo'] except KeyError: pass", "now its been saved & the undo effects it eff_by_redo", "code_lines, model, layer_path) def redo(self): super(LocalizeCompute, self).redo() self.setText(\"Localize compute on", "self.setText('Revert {}'.format(self.node_path)) class ParentNodes(NxtCommand): \"\"\"Parent Nodes\"\"\" def __init__(self, node_paths, parent_node_path,", "new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(LAYERS.TOP) self.undo_effected_layer(self.new_layer_path) self.model.layer_removed.emit(self.new_layer_path) @processing", "__init__(self, node_path, model, layer_path): super(RevertExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, None, model, layer_path)", "model): \"\"\"Sets the color for a given layer, if the", "is not None: display_child_order = getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER) old_child_order = getattr(n,", "not layer.get_muted(local=True) layer.set_muted(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_muted(local=False) self.model.top_layer.set_mute_over(layer.filepath,", "parent_path=parent_path, layer_only=True) super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME, new_name, model, layer_path) def undo(self):", "bool) | (first_effected_by_undo, first_effected_by_redo) \"\"\" first_eff_by_undo = False first_eff_by_redo =", "layer_path self.model = model self.layer_paths = [] def undo(self): self.toggle_state()", "exec input on {}\".format(self.node_path)) class RevertExecPath(SetNodeAttributeValue): def __init__(self, node_path, model,", "model, layer_path): super(SetNodesPosition, self).__init__(model) self.model = model self.layer_path = layer_path", "upon! :param node_path: String of node path :param model: StageModel", "\"\"\"Toggles soloing an existing layer\"\"\" def __init__(self, layer_path, model): super(SoloToggleLayer,", "[] new_nodes = [] node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path) new_node_table, dirty =", "try: first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo'] except KeyError: pass try: first_eff_by_redo =", "value self.model = model self.stage = model.stage self.layer_path = layer_path", "to {}\".format(layer_path) @processing def undo(self): super(RemoveLayer, self).redo() self.setText(self.text) @processing def", "self).__init__(model) self.layer_path = layer_path self.model = model self.layer_paths = []", "model, layer_path): super(RevertInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, None, model, layer_path) def redo(self):", "= self.model.selection # resulting nodes self.new_node_paths = [] @processing def", "def undo(self): layer = self.model.target_layer self.undo_effected_layer(layer.real_path) # undo parent common_parent_nodes", "_add_node_hierarchy(node_path, self.model, layer) target_node = new_nodes[-1] self.created_node_paths += new_paths #", "needs to get local + INTERNAL number of attrs. super(DuplicateNodes,", "given layer\"\"\" def __init__(self, model, layer_path): super(ClearBreakpoints, self).__init__() self.model =", "get node selection for undo self.prev_selection = self.model.selection # get", "= self.color else: self.old_color = layer.get_color(fallback_to_local=False) layer.set_color_over(self.color) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) self.setText(\"Set", "info parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH) name = getattr(node, INTERNAL_ATTRS.NAME) is_break", "layer is saved. :param layer_path: string of layer real path", "model self.layer_path = layer_path self.prev_breaks = [] @processing def undo(self):", "redo... self.undo_effected_layer(self.stage.top_layer.real_path) attr_display = self.node_data.get('attr_display') if attr_display is not None:", "fix_names=False) if self.node_data['break']: self.model._add_breakpoint(self.node_path, layer) self.model._add_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._add_start_node(self.node_path,", "as saved we reset the class attr `_first_effected_by_redo` to False.", "nodes for node_path in self.created_node_paths: n = layer.lookup(node_path) if n", "node_path self.node_data = {} self.others = other_removed_nodes @processing def undo(self):", "if name != prev_name: self.stage.set_node_name(node, name=prev_name, layer=layer, force=True) # restore", "# add new node path to the list and emit", "self.node_paths[idx] attr_state = self.model.remove_attr_display_state(old_node_path) if attr_state is not None: self.model._set_attr_display_state(new_node_path,", "self.node_paths[0] else: path_str = str(self.node_paths) self.setText(\"Parent {} to {}\".format(path_str, self.parent_node_path))", "self.value = value self.model = model self.layer_path = layer_path @processing", "if layer is self.model.top_layer: layer.set_alias(self.old_alias) else: layer.set_alias_over(self.old_alias) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) @processing", "= self.model.comp_layer.lookup(node_path) if not display_node: continue # add node if", "parent_layer, \"filepath\": self.file_path, \"real_path\": self.real_path, \"alias\": layer_data['name'] } layer_data.update(extra_data) self.stage.new_sublayer(layer_data=layer_data,", "self.setText(self.text) class MuteToggleLayer(NxtCommand): \"\"\"Toggles muting an existing layer\"\"\" def __init__(self,", "KeyError: pass try: first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo'] except KeyError: pass return", "self.model.lookup_layer(self.layer_path) self.created_node_paths = [] dirty_nodes = [] nodes, dirty =", "and the comp layer. It is important to note that", "= node_data['parent'] prev_parent_node = layer.lookup(prev_parent_path) new_path = self.node_path_data[old_path] node =", "chdir @processing def undo(self): new_layer = self.model.lookup_layer(self.new_layer_path) if new_layer in", "- start) * 1000))) logger.debug(\"Undo \" + cmd.text() + \"", "parent_path child_order = [] if parent_node: child_order = getattr(parent_node, INTERNAL_ATTRS.CHILD_ORDER)", "path in self.new_positions.keys(): self.old_positions[path] = model.get_node_pos(path) @processing def undo(self): layer", "mark it as such by setting the class attr `_first_effected_by_redo`", "self).__init__(node_path, attr_name, data, model, layer_path) class RenameNode(SetNodeAttributeValue): \"\"\"Rename node\"\"\" def", "= _add_node_hierarchy(self.node_path, self.model, layer) self.created_node_paths += new_paths self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.model.selection =", "self.model.collapse_changed.emit(list(self.prev_values.keys())) if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str", "= True eff_by_undo = False else: # Now the undo", "value, model, layer_path): super(SetNodeBreakPoint, self).__init__() self.node_paths = node_paths self.value =", "if undo is called layer = self.model.lookup_layer(self.layer_path) node = layer.lookup(self.node_path)", "get undo data self.prev_selection = self.model.selection # resulting nodes self.new_node_paths", "{}'.format(self.node_path)) class ParentNodes(NxtCommand): \"\"\"Parent Nodes\"\"\" def __init__(self, node_paths, parent_node_path, model):", "get node info parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH) name = getattr(node,", "user_dir.breakpoints.pop(self.layer_path) self.model.nodes_changed.emit(tuple(self.prev_breaks)) self.setText(\"Clear all breakpoints\") class SetNodeStartPoint(SetNodeAttributeValue): \"\"\"Set this node", "attr_name=attr) self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(layers[0].real_path) self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data", "eff_by_redo = True eff_by_undo = False else: # Now the", "visually beside it's # original top node. node_hierarchy_data = {}", "ways. :param layer_path: string of layer real path :return: (bool,", "@processing def redo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer self.node_data", "if top_node is None: top_node = node top_node_path = layer.get_node_path(top_node)", "not created_node: self.return_value = self.stage.node_setattr_data(node, self.attr_name, layer=layer, create=True, comp_layer=comp, **self.data)", "ClearBreakpoints(QUndoCommand): \"\"\"Clear all the breakpoints for a given layer\"\"\" def", "layer.lookup(node_path) if node is not None: _, dirty = self.stage.delete_node(node,", "layer.lookup(ancestor_path) if ancestor: self.stage.set_node_child_order(ancestor, child_order, layer) if new_path in list(self.model.top_layer.positions.keys()):", "def undo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.color", "= [pos[0] + 20, pos[1] + 20] self.model._set_node_pos(new_node_path, pos, layer=target_layer)", "False try: first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo'] except KeyError: pass try: first_eff_by_redo", "layer) super(InstanceNode, self).redo() self.return_value = self.new_path self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE))) class SetNodesPosition(NxtCommand):", "self.real_path, \"alias\": layer_data['name'] } layer_data.update(extra_data) self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) # Fixme: The", "__init__(self, node_path, model, layer_path): super(RevertCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, [], model, layer_path)", "super(DeleteNode, self).__init__(model) self.layer_path = layer_path self.stage = model.stage # get", "self.real_path = nxt_path.full_file_expand(self.file_path, chdir) @processing def undo(self): new_layer = self.model.lookup_layer(self.real_path)", "self.model.lookup_layer(self.new_layer_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(LAYERS.TOP) self.undo_effected_layer(self.new_layer_path)", "layer.get_alias(fallback_to_local=False) layer.set_alias_over(self.alias) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) self.setText(\"Set {} alias to {}\".format(layer.filepath, self.alias))", "self.redo_effected_layer(new_layer.real_path) # Fixme: The next 2 lines each build once", "ancestor: self.stage.set_node_child_order(ancestor, child_order, layer) if new_path in list(self.model.top_layer.positions.keys()): source_layer =", "node_path, exec_source, model, layer_path): super(SetNodeExecuteSources, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_source, model, layer_path)", "attr_state is not None: self.model._set_attr_display_state(new_node_path, attr_state) # set position for", "== INTERNAL_ATTRS.INSTANCE_PATH: dirties += result if self.attr_name in INTERNAL_ATTRS.ALL: dirties", "has_parent and new_node_path != node_path: pos = self.model.get_node_pos(node_path) pos =", "self.created_node_paths: node = layer.lookup(node_path) if node is not None: _,", "for old_path, node_data in self.prev_node_data.items(): prev_parent_path = node_data['parent'] prev_parent_node =", "in data: data.pop(META_ATTRS.SOURCE) super(LocalizeUserAttr, self).__init__(node_path, attr_name, data, model, layer_path) class", "real path :return: None \"\"\" layer_unsaved = layer_path in self.model.effected_layers", "# Set redo to False since now its been saved", "def undo(self): self.toggle_state() for layer_path in self.layer_paths: self.undo_effected_layer(layer_path) def redo(self):", "here to insure attr is deleted self.remove_attr = True super(DeleteAttribute,", "layer.get_soloed(local=True) layer.set_soloed(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_soloed(local=False) self.model.top_layer.set_solo_over(layer.filepath, state)", "= [] layer = self.model.target_layer for node_path in self.node_paths: node_data", "delete any created nodes for node_path in self.created_node_paths: node =", "{} to {}\".format(attr_path, val)) # redo_debug(self, start) class SetNodeAttributeValue(SetNodeAttributeData): def", "recomp if layer.descendants(self.node_path): self.recomp = True created_node = True self.created_node_paths", "# get current node hierarchy information for each node. each", "str(self.node_paths) if self.value: self.setText(\"Collapsed {}\".format(path_str)) else: self.setText(\"Expanded {}\".format(path_str)) class SetNodeExecuteSources(SetNodeAttributeValue):", "[] node = layer.lookup(self.node_path) # get node info parent =", "self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, [], model, layer_path) def redo(self): super(RevertCompute, self).redo() self.setText(\"Revert", "undo(self): super(DeleteAttribute, self).redo() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) def redo(self): #", "other_removed_nodes @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer", "a # net zero effect on the layer try: self.model.effected_layers.remove(layer_path)", "None \"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_path) layer_saved = layer_path not", "self.stage.remove_sublayer(new_layer) self.model.set_target_layer(LAYERS.TOP) self.model.update_comp_layer(rebuild=True) self.model.layer_removed.emit(self.real_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers)", "time.time() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) comp = self.model.comp_layer dirties =", "def __init__(self, node_path, model, layer_path): super(RevertInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, None, model,", "self.model.about_to_rename.emit() super(RenameNode, self).redo() self.node_path = self.return_value self.model.selection = [self.node_path] if", "class RevertInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH,", "super(SetCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(SetCompute, self).redo()", "self.layer_paths.append(layer.real_path) else: state = not layer.get_soloed(local=False) self.model.top_layer.set_solo_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True)", "self.created_node_paths = [] super(RevertNode, self).redo() layer = self.model.lookup_layer(self.layer_path) # Re-create", "nodes self.node_path_data = {} self.new_node_paths = [] self.created_node_paths = []", "__init__(self, node_path, attr_name, comment, model, layer_path): data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment}", "from nxt import nxt_io from nxt import GRID_SIZE import nxt_editor", "if ancestor: self.stage.set_node_child_order(ancestor, child_order, layer) if new_path in list(self.model.top_layer.positions.keys()): source_layer", "False first_eff_by_redo = False try: first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo'] except KeyError:", "layer.get_color(fallback_to_local=False) layer.set_color_over(self.color) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) self.setText(\"Set {} color to {}\".format(layer.filepath, self.color))", "= nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(src_name, model.comp_layer, parent_path=parent_path) new_path = nxt_path.join_node_paths(parent_path,", "used to # set multiple attr's data. That way duplicate", "for node, old_path in nodes_dict.items(): node_data = self.prev_node_data[old_path] # restore", "= model self.prev_paths = self.model.selection def undo(self): self.model.selection = self.prev_paths", "compute on {}\".format(self.node_path)) class RenameAttribute(NxtCommand): \"\"\"Rename attribute\"\"\" def __init__(self, node_path,", "self.return_value self.model.selection = [self.node_path] if self.model.get_is_node_start(self.node_path, self.model.comp_layer): self.model.starts_changed.emit(self.model.get_start_nodes()) self.setText(\"{} renamed", "[self.node_path] def redo(self): self.model.about_to_rename.emit() super(RenameNode, self).redo() self.node_path = self.return_value self.model.selection", "not None: self.stage.delete_node(n, layer=layer, comp_layer=comp, remove_layer_data=False) n = layer.lookup(self.node_path) if", "= layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.new_attr_name,", "node, this behavior is depended upon! :param node_path: String of", "# add node if it doesn't exist on the target", "where_were_at = self.model.undo_stack.index() cur_cmd = self.model.undo_stack.command(max(0, where_were_at - 1)) if", "self.stage.get_node_data(node, layer) node_data['pos'] = self.model.get_node_pos(node_path) node_data['name'] = name node_data['parent'] =", "self.data.get(META_ATTRS.VALUE) if val is None: self.setText(\"Removed exec input for {}\".format(self.node_path))", "is deleted self.remove_attr = True super(DeleteAttribute, self).undo() layer = self.model.lookup_layer(self.layer_path)", "= file_path self.real_path = nxt_path.full_file_expand(self.file_path, chdir) @processing def undo(self): new_layer", "model, layer_path): self.old_node_path = node_path layer = model.lookup_layer(layer_path) parent_path =", "= layer_path self.stage = model.stage # command data self.pos =", "= node_paths # resulting nodes self.node_path_data = {} self.new_node_paths =", "nodes_dict.items(): node_data = self.prev_node_data[old_path] # restore name prev_name = node_data['name']", "un-parented each node can be placed visually beside it's #", "getattr(closest_ancestor, INTERNAL_ATTRS.CHILD_ORDER) self.node_data['ancestor_child_order'] = (closest_ancestor_path, ancestor_child_order[:]) # Attr display data", "str(self.data.get(META_ATTRS.VALUE)) self.setText(\"Set {} to {}\".format(attr_path, val)) # redo_debug(self, start) class", "= False # Tells the delete command not to re-comp", "{nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}} self.return_value = self.attr_name _, dirties = self.stage.add_node(name=name,", "self.setText(\"Changed compute on {}\".format(self.node_path)) class SetNodeComment(SetNodeAttributeValue): \"\"\"Set node comment\"\"\" def", "RemoveLayer(ReferenceLayer): \"\"\"Remove existing layer\"\"\" def __init__(self, layer_path, model): idx =", "node self.stage.transfer_node_data(target_node, self.model.target_layer, display_node, self.model.comp_layer) self.prev_node_data[node_path] = node_data self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.redo_effected_layer(layer.real_path)", "super(SetNodeCollapse, self).__init__(model) self.node_paths = node_paths self.value = value self.model =", "self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(LAYERS.TOP) self.undo_effected_layer(self.new_layer_path) self.model.layer_removed.emit(self.new_layer_path) @processing def redo(self):", "was saved again. eff_by_redo = True eff_by_undo = False else:", "len(self.new_positions) == 1: nodes_str = node_path else: nodes_str = 'nodes'", "INTERNAL_ATTRS.as_save_key(self.attr_name) attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)} else: attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name:", "layer has been saved and the undo queue # was", "that will be deleted in this event loop. \"\"\" super(DeleteNode,", "top node. node_hierarchy_data = {} if self.parent_node_path is nxt_path.WORLD: for", "self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_path, model, layer_path) def redo(self): super(LocalizeExecPath, self).redo() self.setText(\"Localize", "self.parent_node_path is nxt_path.WORLD: for node_path in self.node_paths: node = layer.lookup(node_path)", "path_str = self.node_paths[0] else: path_str = str(self.node_paths) self.setText(\"Parent {} to", "attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp)", "not exist if not self.stage.node_attr_exists(node, self.attr_name): self.remove_attr = True if", "in self.created_node_paths: n = self.model.target_layer.lookup(node_path) if n is not None:", "self.stage.add_node(name=self.name, data=self.data, parent=self.parent_path, layer=layer.layer_idx(), comp_layer=self.model.comp_layer) dirty_nodes += dirty self.node_path =", "node_path in self.node_paths: node = source_layer.lookup(node_path) # duplicate node new,", "redo(self): self.layer_paths = [] self.toggle_state() for layer_path in self.layer_paths: self.redo_effected_layer(layer_path)", "[] node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path) new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy, parent=None, layer=layer,", "self.new_path = new_path super(InstanceNode, self).__init__(new_path, INTERNAL_ATTRS.INSTANCE_PATH, node_path, model, target_layer_path) def", "effected state for a given layer with context to this", "the redo eff_by_redo = False eff_by_undo = True self._layers_effected_by_me[layer_just_saved] =", "= getattr(parent_node, INTERNAL_ATTRS.CHILD_ORDER) else: ancestors = layer.ancestors(node_path) if ancestors: ancestor", "not None: self.model._set_attr_display_state(new_node_path, attr_state) # set position for un-parent if", "class MuteToggleLayer(NxtCommand): \"\"\"Toggles muting an existing layer\"\"\" def __init__(self, layer_path,", "False else: # Now the undo of this command effects", "SetNodeEnabledState(SetNodeAttributeValue): \"\"\"Set node enabled state\"\"\" def __init__(self, node_path, value, model,", "= True self.setText(\"Add {} attr to {}\".format(self.attr_name, self.node_path)) class DeleteAttribute(AddAttribute):", "self.nice_attr_name) self.setText(\"Changed comment on {}\".format(attr_path)) class SetCompute(SetNodeAttributeValue): \"\"\"Set node code", "self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer parent = self.node_data['parent'] # We don't", "# preserve original data node_data['data'] = get_node_as_dict(target_node) # localize source", "= {} if self.parent_node_path is nxt_path.WORLD: for node_path in self.node_paths:", "+= (attr_path,) self.model.attrs_changed.emit(changed_attrs) attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) val = str(self.data.get(META_ATTRS.VALUE))", "comment, model, layer_path) def redo(self): super(SetNodeComment, self).redo() self.setText(\"Changed comment on", "not eff_by_undo: self._layers_effected_by_me[layer_path] = {'undo': False, 'redo': True} self.model.effected_layers.add(layer_path) else:", "node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path, child_order] self.prev_node_data[node_path] = node_data nodes += [node]", "'break': is_break} closest_ancestor = layer.ancestors(self.node_path) if closest_ancestor: closest_ancestor = closest_ancestor[0]", "layer_path): super(SetNodeEnabledState, self).__init__(node_path, INTERNAL_ATTRS.ENABLED, value, model, layer_path) def redo(self): super(SetNodeEnabledState,", "pos=old_pos, layer=layer) self.undo_effected_layer(self.layer_path) @processing def redo(self): delta_str = None layer", "__init__(self, node_path, model, layer_path): inst_path = model.get_node_instance_path(node_path, model.comp_layer, expand=False) super(LocalizeInstancePath,", "color.lower() open_layer_colors += [color] layer_color = layer_color_index[0] for c in", "layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data) dirty_nodes += dirty dirty_nodes += self.created_node_paths dirty_nodes", "layer is self.model.top_layer: self.old_alias = layer.get_alias(local=True) layer.set_alias(self.alias) else: self.old_alias =", "'name': name, 'pos': self.model.get_node_pos(self.node_path), 'break': is_break} closest_ancestor = layer.ancestors(self.node_path) if", "saved and the undo queue # was moved to an", "True super(RenameNode, self).undo() self.node_path = self.old_node_path self.model.selection = [self.node_path] def", "self.stage.node_setattr_data(node=n, attr=self.attr_name, layer=layer, create=False, comp_layer=comp, **self.prev_data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH:", "INTERNAL_ATTRS.COMPUTE, [], model, layer_path) def redo(self): super(RevertCompute, self).redo() self.setText(\"Revert compute", "= model self.model.layer_saved.connect(self.reset_layer_effected) self._layers_effected_by_me = {} def _get_effects(self, layer_path): \"\"\"Gets", "self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.model.selection = self.prev_selection self.setText('Revert {}'.format(self.node_path)) class ParentNodes(NxtCommand): \"\"\"Parent Nodes\"\"\"", "[] self.prev_breaks = {} self.node_path = node_path self.node_data = {}", "self.prev_selection = self.model.selection # resulting node self.node_path = None self.created_node_paths", "remove_layer_data=False) layers = [self.model.target_layer] for node_path, all_data in self.prev_node_data.items(): apply_data", "self.new_path self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE))) class SetNodesPosition(NxtCommand): \"\"\"Move nodes\"\"\" def __init__(self, node_positions,", "__init__(self, node_paths, value, model, layer_path): super(SetNodeCollapse, self).__init__(model) self.node_paths = node_paths", "class DuplicateNodes(NxtCommand): \"\"\"Duplicate nodes on this graph\"\"\" def __init__(self, node_paths,", "= self.node_path_data[old_path] node = layer.lookup(new_path) if prev_parent_path not in list(common_parent_nodes.keys()):", "if not node_hierarchy_data: return # parent self.node_path_data = self.stage.parent_nodes(nodes, self.parent_node_path,", "self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) comp = self.model.comp_layer self.remove_attr = False self.created_node_paths =", "self.model = model self.stage = model.stage self.layer_path = layer_path self.prev_values", "(self.remove_attr or self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else:", "layer.get_node_path(nodes[0]) self.model._set_node_pos(node_path=self.node_path, pos=self.pos, layer=layer) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = [self.node_path] self.redo_effected_layer(layer.real_path) self.setText('Added", "self.prev_selection = self.model.selection layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) comp = self.model.comp_layer", "if n is not None: self.stage.delete_node(n, layer, remove_layer_data=False) super(RevertNode, self).undo()", "self.old_alias = layer.get_alias(local=True) layer.set_alias(self.alias) else: self.old_alias = layer.get_alias(fallback_to_local=False) layer.set_alias_over(self.alias) self.redo_effected_layer(self.model.top_layer.real_path)", "= [] layer = self.model.target_layer self.redo_effected_layer(layer.real_path) for node_path in self.node_paths:", "pos # This might be a bug? We don't touch", "# they are un-parented each node can be placed visually", "is not None: _, dirty = self.stage.delete_node(node, layer, remove_layer_data=False) dirty_nodes", "is self.model.top_layer: state = not layer.get_muted(local=True) layer.set_muted(state) self.layer_paths.append(layer.real_path) else: state", "saved again. eff_by_redo = True eff_by_undo = False else: #", "redo(self): super(RevertInstancePath, self).redo() self.setText(\"Revert instance path on {}\".format(self.node_path)) class LocalizeExecPath(SetNodeAttributeValue):", "= str(int(round((time.time() - start) * 1000))) logger.debug(cmd.text() + \" |", "self.model = model self.stage = model.stage self.prev_selection = self.model.selection self.prev_node_data", "self.undo_effected_layer(layer.real_path) # undo parent common_parent_nodes = {} for old_path, node_data", "state\"\"\" def __init__(self, node_path, value, model, layer_path): super(SetNodeEnabledState, self).__init__(node_path, INTERNAL_ATTRS.ENABLED,", "self.stage = model.stage # command data self.pos = pos or", "= True else: rm_layer_data = False for p in self.others[:]:", "new_attr_name, layer) self.model.update_comp_layer() old_name = nxt_path.make_attr_path(self.node_path, attr_name) new_name = nxt_path.make_attr_path(self.node_path,", "not in list(common_parent_nodes.keys()): common_parent_nodes[prev_parent_path] = {node: old_path} else: common_parent_nodes[prev_parent_path][node] =", "[] self.created_node_paths = [] nodes = [] layer = self.model.target_layer", "def __init__(self, node_path, attr_name, data, model, layer_path): super(SetNodeAttributeData, self).__init__(model) self.node_path", "of attrs. super(DuplicateNodes, self).__init__(model) self.node_paths = node_paths self.descendants = descendants", "= layer.lookup(node_path) if node is not None: _, dirty =", "self.stage.transfer_node_data(node, self.model.target_layer, apply_data, self.model.comp_layer) local_attrs = self.stage.get_node_local_attr_names(node_path, layers) for attr", "= getattr(node, INTERNAL_ATTRS.NAME) parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH) self.stage.get_node_data(node, layer) node_data", "redo_debug(self, start) class SetNodeAttributeValue(SetNodeAttributeData): def __init__(self, node_path, attr_name, value, model,", "layer.get_muted(local=True) layer.set_muted(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_muted(local=False) self.model.top_layer.set_mute_over(layer.filepath, state)", "(bool, bool) | (first_effected_by_undo, first_effected_by_redo) \"\"\" first_eff_by_undo = False first_eff_by_redo", "= [] @processing def undo(self): user_dir.breakpoints[self.layer_path] = self.prev_breaks self.model.nodes_changed.emit(tuple(self.prev_breaks)) @processing", "avoid the need for a recomp if layer.descendants(self.node_path): self.recomp =", "nxt_path.node_name_from_node_path(node_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(src_name, model.comp_layer, parent_path=parent_path) new_path", "to False. This makes sure the layer is properly marked", "[]) if self.layer_path in list(user_dir.breakpoints.keys()): user_dir.breakpoints.pop(self.layer_path) self.model.nodes_changed.emit(tuple(self.prev_breaks)) self.setText(\"Clear all breakpoints\")", "# Fixme: Does not account for rebuilding proxy nodes for", "compute on {}\".format(self.node_path)) class SetNodeComment(SetNodeAttributeValue): \"\"\"Set node comment\"\"\" def __init__(self,", "node_path else: nodes_str = 'nodes' self.setText('Move {} {}'.format(nodes_str, delta_str)) self.redo_effected_layer(layer.real_path)", "update_time = str(int(round((time.time() - start) * 1000))) logger.debug(\"Undo \" +", "self.node_data['start']: self.model._add_start_node(self.node_path, layer) # restore layer data pos = self.node_data.get('pos')", "class that can be used to # set multiple attr's", "= target_layer.get_node_path(new_node) self.new_node_paths += [new_node_path] # self.model.node_added.emit(new_node_path) # set position", "self.created_node_paths = [] layer = self.model.target_layer for node_path in self.node_paths:", "empty node new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path, self.model, layer) self.created_node_paths", "layer) def undo(self): super(DeleteAttribute, self).redo() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) def", "self.attr_name, layer=layer, create=True, comp_layer=comp, **self.data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties", "the layer has been saved and the undo queue #", "is depended upon! :param node_path: String of node path :param", "if layer_saved: eff_by_undo = True # Set redo to False", "node execute sources\"\"\" def __init__(self, node_path, exec_source, model, layer_path): super(SetNodeExecuteSources,", "= [] def undo(self): self.toggle_state() for layer_path in self.layer_paths: self.undo_effected_layer(layer_path)", "self.model.effected_layers.remove(layer_path) except KeyError: # Removed by a save action pass", "INTERNAL_ATTRS.PARENT_PATH) name = getattr(node, INTERNAL_ATTRS.NAME) is_break = self.model.get_is_node_breakpoint(self.node_path, layer) self.node_data", "attr_name, None, model, layer_path) # Get the data to be", "layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer self.node_data = {} self.prev_starts", "path_str = self.node_paths[0] else: path_str = str(self.node_paths) if self.value: self.setText(\"Collapsed", "SetNodeExecuteSources(SetNodeAttributeValue): \"\"\"Set node execute sources\"\"\" def __init__(self, node_path, exec_source, model,", "class SetNodeChildOrder(SetNodeAttributeValue): \"\"\"Set node child order\"\"\" def __init__(self, node_path, child_order,", "INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: self.model.attrs_changed.emit(changed_attrs) if not self.recomp: changed = tuple([self.node_path]", "self.model._remove_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._remove_start_node(self.node_path, layer) node = layer.lookup(self.node_path) source_layer", "parent_layer = None layer_data = nxt_io.load_file_data(self.real_path) extra_data = {\"parent_layer\": parent_layer,", "eff_by_undo, 'redo': eff_by_redo} class AddNode(NxtCommand): \"\"\"Add a node to the", "This command has already been marked as undo effects the", "self.model.nodes_changed.emit(dirty_set) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer", "execution start point\"\"\" def __init__(self, node_path, value, model, layer_path): super(SetNodeStartPoint,", "ancestors: ancestor = ancestors[0] ancestor_path = layer.get_node_path(ancestor) child_order = self.stage.get_node_child_order(ancestor)", "super(SetNodeInstance, self).redo() txt = (\"Set inst path on \" \"{}", "node_path, attr_name, model, layer_path): super(DeleteAttribute, self).__init__(node_path, attr_name, None, model, layer_path)", "be used to # set multiple attr's data. That way", "redo(self): super(RevertCompute, self).redo() self.setText(\"Revert compute on {}\".format(self.node_path)) class RenameAttribute(NxtCommand): \"\"\"Rename", "deleted in this event loop. \"\"\" super(DeleteNode, self).__init__(model) self.layer_path =", "AddAttribute(SetNodeAttributeData): \"\"\"Add an attribute to a node.\"\"\" def __init__(self, node_path,", "model, layer_path) def redo(self): super(SetCompute, self).redo() self.setText(\"Changed compute on {}\".format(self.node_path))", "paths: try: new_selection.remove(path) except ValueError: continue super(RemoveFromSelection, self).__init__(new_selection, model) def", "layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) for node_path, prev_value in self.prev_values.items(): layer.collapse[node_path]", "self).undo() self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection def redo(self): self.created_node_paths = []", "it. :param layer_just_saved: string of layer real path :return: None", "self.prev_selection = self.model.selection self.prev_starts = [] self.prev_breaks = {} self.node_path", "parent common_parent_nodes = {} for old_path, node_data in self.prev_node_data.items(): prev_parent_path", "self.model.selection = self.prev_selection @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.created_node_paths", "else: result = self.stage.node_setattr_data(node=n, attr=self.attr_name, layer=layer, create=False, comp_layer=comp, **self.prev_data) if", "data, model, layer_path) class LocalizeCompute(SetNodeAttributeValue): \"\"\"Localize nodes\"\"\" def __init__(self, node_path,", "self._layers_effected_by_me[layer_path]['undo'] except KeyError: pass try: first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo'] except KeyError:", "parent_path = getattr(node, INTERNAL_ATTRS.PARENT_PATH) self.stage.get_node_data(node, layer) node_data = self.stage.get_node_data(node, layer)", "def redo(self): sub_layer_count = len(self.stage._sub_layers) if 0 < self.insert_idx <=", ":return: None \"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved) where_were_at = self.model.undo_stack.index()", "is not None: if self.remove_attr: self.stage.delete_node_attr(n, self.attr_name) dirties += comp.get_node_dirties(self.node_path)", "not has_parent and new_node_path != node_path: pos = self.model.get_node_pos(node_path) pos", "data self.prev_selection = self.model.selection # resulting nodes self.new_node_paths = []", "= self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: state = not layer.get_muted(local=True)", "# restore position if self.parent_node_path != nxt_path.WORLD: prev_pos = node_data['pos']", "action pass self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo, 'redo': eff_by_redo} class AddNode(NxtCommand):", "closest_ancestor = None closest_ancestor_path = layer.get_node_path(closest_ancestor) if closest_ancestor_path: ancestor_child_order =", "this behavior is depended upon! :param node_path: String of node", "= False self.prev_data = {} self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP", "# Now the undo of this command effects the layer", "= parent_path child_order = [] if parent_node: child_order = getattr(parent_node,", "important to note that the other_removed_nodes list must be shared", "super(RenameNode, self).redo() self.node_path = self.return_value self.model.selection = [self.node_path] if self.model.get_is_node_start(self.node_path,", "undo data self.prev_selection = self.model.selection # resulting nodes self.new_node_paths =", "node_paths, descendants, model, source_layer_path, target_layer_path): # TODO: We should make", "attr_name self.data = data self.stage = model.stage self.layer_path = layer_path", "resulting node self.node_path = None self.created_node_paths = [] @processing def", "pos = self.model.get_node_pos(node_path) pos = [pos[0] + 20, pos[1] +", "hierarchy information for each node. each node # path is", "super(DeleteAttribute, self).__init__(node_path, attr_name, None, model, layer_path) # Get the data", "= self.prev_breaks ancestor_tuple = self.node_data.get('ancestor_child_order') if ancestor_tuple: ancestor_path, ancestor_child_order =", "dirties += self.return_value if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path)", "layer\"\"\" def __init__(self, layer_path, model): super(SoloToggleLayer, self).__init__(model) self.layer_path = layer_path", "can just be a # setattr. The way it works", "# Remove our created empty nodes for node_path in self.created_node_paths:", "on the layer try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed by", "exec_path, model, layer_path) def redo(self): super(LocalizeExecPath, self).redo() self.setText(\"Localize exec input", "_add_node_hierarchy(base_node_path, model, layer): stage = model.stage comp_layer = model.comp_layer new_node_paths", "+= [node] node_hierarchy_data[top_node_path] = top_node_descendant_list if not node_hierarchy_data: return #", "self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if layer", "layers. If this command was the first to effect the", "comp_layer = model.comp_layer display_node = comp_layer.lookup(node_path) code_lines = model.stage.get_node_code_lines(display_node, comp_layer)", "remove_layer_data=True) self.model.selection = self.prev_selection self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(target_layer.real_path) @processing def redo(self): new_selection", "INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(LocalizeCompute, self).redo() self.setText(\"Localize compute", "layer = self.model.lookup_layer(self.layer_path) if not self.value: func = self.model._add_breakpoint else:", "else: parent_layer = None layer_color_index = [str(k.name()) for k in", "for node_path in self.node_paths: node = layer.lookup(node_path) name = getattr(node,", "(GRID_SIZE * 16, 0), layer) self.model._set_node_pos(self.new_path, new_pos, layer) super(InstanceNode, self).redo()", "need for a recomp if layer.descendants(self.node_path): self.recomp = True created_node", "= model self.stage = model.stage self.layer_path = layer_path self.prev_values =", "def redo(self): super(RemoveFromSelection, self).redo() self.setText('Remove {} from selection'.format(self.rem_paths)) class LocalizeNodes(NxtCommand):", "self.model.comp_layer self.remove_attr = False self.created_node_paths = [] # get the", "chdir): super(NewLayer, self).__init__(model) self.new_layer_path = None self.model = model self.stage", "= self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) comp = self.model.comp_layer self.remove_attr = False self.created_node_paths", "in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: self.model.attrs_changed.emit(changed_attrs) if not self.recomp: changed", "model, layer_path): super(RevertExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, None, model, layer_path) def redo(self):", "layer = self.model.lookup_layer(self.layer_path) for node_path, new_pos in self.new_positions.items(): self.model._set_node_pos(node_path=node_path, pos=new_pos,", "class SetCompute(SetNodeAttributeValue): \"\"\"Set node code value\"\"\" def __init__(self, node_path, code_lines,", "self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo, 'redo': eff_by_redo} def redo_effected_layer(self, layer_path): \"\"\"Adds", "if it doesn't exist on the target layer target_node =", "instance path on {}\".format(self.node_path)) class LocalizeExecPath(SetNodeAttributeValue): def __init__(self, node_path, model,", "super(RevertCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, [], model, layer_path) def redo(self): super(RevertCompute, self).redo()", "__init__(self, node_path, instance_path, model, layer_path): super(SetNodeInstance, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, instance_path, model,", "self.model._set_node_pos(node_path=node_path, pos=new_pos, layer=layer) if not delta_str: pos = new_pos prev_pos", "result if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) changed_attrs =", "file_path self.real_path = nxt_path.full_file_expand(self.file_path, chdir) @processing def undo(self): new_layer =", "else: self.setText(\"Remove breakpoint from {}\".format(path_str)) class ClearBreakpoints(QUndoCommand): \"\"\"Clear all the", "been saved and the undo queue # was moved to", "model, layer_path): super(SetCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self):", "1: nodes_str = self.node_paths[0] else: nodes_str = 'nodes' self.setText('Duplicated {}'.format(nodes_str))", "for undo self.prev_selection = self.model.selection # get previous node data", "top node so when # they are un-parented each node", "= False self.model.effected_layers.add(layer_path) elif eff_by_redo: try: self.model.effected_layers.remove(layer_path) except KeyError: #", "self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = [self.node_path] self.redo_effected_layer(layer.real_path) self.setText('Added node: {}'.format(self.node_path)) class DeleteNode(NxtCommand):", "ancestor_child_order) self.model.selection = self.prev_selection # Fixme: Does not account for", "+= (attr_path,) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or self.created_node_paths", "+= comp.get_node_dirties(self.node_path) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or self.created_node_paths", "for node_path, new_pos in self.new_positions.items(): self.model._set_node_pos(node_path=node_path, pos=new_pos, layer=layer) if not", "comp = self.model.comp_layer dirties = [self.node_path] # delete any created", "__init__(self, node_paths, descendants, model, source_layer_path, target_layer_path): # TODO: We should", "source_layer_path, target_layer_path): # TODO: We should make another base command", "= model.stage # get undo data self.prev_selection = self.model.selection #", "self.prev_selection def redo(self): self.created_node_paths = [] super(RevertNode, self).redo() layer =", "layer at the layer path and the comp layer. It", "self.model.comp_layer.lookup(node_path) if not display_node: continue # add node if it", "name != prev_name: self.stage.set_node_name(node, name=prev_name, layer=layer, force=True) # restore position", "{} display_node = self.model.comp_layer.lookup(node_path) if not display_node: continue # add", "layer.lookup(self.node_path) self.data = self.stage.get_node_attr_data(node, self.attr_name, layer) def undo(self): super(DeleteAttribute, self).redo()", "super(SetNodeInstance, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, instance_path, model, layer_path) def redo(self): super(SetNodeInstance, self).redo()", "== 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) self.setText('Localize", "layer_path) def redo(self): super(SetNodeEnabledState, self).redo() if self.data.get(META_ATTRS.VALUE): self.setText(\"Enabled {}\".format(self.node_path)) else:", "(unsaved) layers. If the layer is not marked as effected", "path_str = self.node_paths[0] else: path_str = str(self.node_paths) if self.value: self.setText(\"Add", "if display_node is not None: display_child_order = getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER) old_child_order", "Fixme: The next 2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.new_layer_path)", "not None: self.stage.delete_node(n, layer, remove_layer_data=False) super(RevertNode, self).undo() self.model.update_comp_layer(rebuild=True) self.model.selection =", "+= self.return_value if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) if", "@processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.prev_values = {}", "super(SetNodeChildOrder, self).redo() self.setText(\"Change child order on {}\".format(self.node_path)) class SetLayerAlias(NxtCommand): \"\"\"Set", "= top_node_descendant_list if not node_hierarchy_data: return # parent self.node_path_data =", "attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) self.model.attrs_changed.emit(changed_attrs) attr_path =", "GRID_SIZE import nxt_editor logger = logging.getLogger(nxt_editor.LOGGER_NAME) def processing(func): def wrapper(self):", "compute on {}\".format(self.node_path)) class LocalizeInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path):", "to {}\".format(layer.filepath, self.alias)) class NewLayer(NxtCommand): \"\"\"Add new layer\"\"\" def __init__(self,", "+= comp.get_node_dirties(self.node_path) else: result = self.stage.node_setattr_data(node=n, attr=self.attr_name, layer=layer, create=False, comp_layer=comp,", "sub_layer_count: parent_layer = self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer =", "self.created_node_paths = [] dirty_nodes = [] nodes, dirty = self.stage.add_node(name=self.name,", ":param layer_path: real path of layer :param model: StageModel \"\"\"", "self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.new_layer_path) self.model.layer_added.emit(self.new_layer_path) self.setText(\"New layer {}\".format(self.new_layer_path)) class ReferenceLayer(NxtCommand): \"\"\"Refernce existing", "called, thus this redo has a # net zero effect", "else: parent_layer = None layer_data = nxt_io.load_file_data(self.real_path) extra_data = {\"parent_layer\":", "\"\"\" super(DeleteNode, self).__init__(model) self.layer_path = layer_path self.stage = model.stage #", "model self.layer_path = layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path)", "self.prev_selection @processing def redo(self): self.prev_node_data = {} self.created_node_paths = []", "self.node_paths = node_paths self.value = value self.model = model self.layer_path", "Removed by a save action pass def undo_effected_layer(self, layer_path): \"\"\"Removes", "model self.layer_path = layer_path self.new_positions = node_positions self.old_positions = {}", "net zero effect on the layer try: self.model.effected_layers.remove(layer_path) except KeyError:", "layer_path self.stage = model.stage # get undo data self.prev_selection =", "model.stage self.layer_path = layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path)", "display_node: continue # add node if it doesn't exist on", "was the first to effect the layer we mark it", "idx = 0 for old_node_path in self.node_paths: new_node_path = self.new_node_paths[idx]", "layer_path) def redo(self): super(SetNodeInstance, self).redo() txt = (\"Set inst path", "layer=layer) for parent_path, nodes_dict in common_parent_nodes.items(): for node, old_path in", "nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) self.model.attrs_changed.emit(changed_attrs) attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name)", "self.prev_breaks = user_dir.breakpoints.get(self.layer_path, []) if self.layer_path in list(user_dir.breakpoints.keys()): user_dir.breakpoints.pop(self.layer_path) self.model.nodes_changed.emit(tuple(self.prev_breaks))", "self.model.nodes_changed.emit(dirties) else: changed_attrs = () for dirty in dirties: attr_path", "if not has_parent and new_node_path != node_path: pos = self.model.get_node_pos(node_path)", "on this graph\"\"\" def __init__(self, node_path, model, source_layer_path, target_layer_path): src_name", "LocalizeUserAttr(SetNodeAttributeData): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, attr_name, model, layer_path): node", "parent_path, nodes_dict in common_parent_nodes.items(): self.stage.parent_nodes(nodes=list(nodes_dict.keys()), parent_path=parent_path, layer=layer) for parent_path, nodes_dict", "class SetNodeEnabledState(SetNodeAttributeValue): \"\"\"Set node enabled state\"\"\" def __init__(self, node_path, value,", "first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo'] except KeyError: pass try: first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo']", "layer_path not in self.model.effected_layers if layer_saved: eff_by_undo = True #", "self.model.selection = list(self.node_path_data.values()) if len(self.node_paths) == 1: path_str = self.node_paths[0]", "idx self.file_path = file_path self.file_name = file_name self.chdir = chdir", "RevertCompute(SetNodeAttributeValue): \"\"\"Revert compute\"\"\" def __init__(self, node_path, model, layer_path): super(RevertCompute, self).__init__(node_path,", "undo(self): target_layer = self.model.lookup_layer(self.target_layer_path) # delete duplicated nodes for node_path", "super(SetNodeBreakPoint, self).__init__() self.node_paths = node_paths self.value = value self.model =", "layer.get_node_path(closest_ancestor) if closest_ancestor_path: ancestor_child_order = getattr(closest_ancestor, INTERNAL_ATTRS.CHILD_ORDER) self.node_data['ancestor_child_order'] = (closest_ancestor_path,", "= model.stage.get_node_attr_data(node, attr_name, model.comp_layer) if META_ATTRS.SOURCE in data: data.pop(META_ATTRS.SOURCE) super(LocalizeUserAttr,", "dirty_nodes = [] node = layer.lookup(self.node_path) # get node info", "class SetNodeAttributeValue(SetNodeAttributeData): def __init__(self, node_path, attr_name, value, model, layer_path): data", "for each node. each node # path is placed in", "not account for rebuilding proxy nodes for the dirty nodes", "marked as effected in the model we mark it as", "self.stage = model.stage self.node_paths = node_paths # resulting nodes self.node_path_data", "class attr `_first_effected_by_redo` to True. :param layer_path: string of layer", "* 14, GRID_SIZE), self.model.top_layer) self.model._set_node_pos(new_node_path, new_pos, layer) idx += 1", "import nxt_editor logger = logging.getLogger(nxt_editor.LOGGER_NAME) def processing(func): def wrapper(self): self.model.processing.emit(True)", "nxt import GRID_SIZE import nxt_editor logger = logging.getLogger(nxt_editor.LOGGER_NAME) def processing(func):", "undo(self): layer = self.model.lookup_layer(self.layer_path) # Remove our created empty nodes", "what it was named when it was deleted new_nodes, dirty", "node_path, code_lines, model, layer_path): super(SetCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path)", "= self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) self.new_layer_path = new_layer.real_path self.redo_effected_layer(new_layer.real_path) # Fixme: The", "comp_layer.lookup(node_path) code_lines = model.stage.get_node_code_lines(display_node, comp_layer) super(LocalizeCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model,", "__init__(self, node_path, name, model, layer_path): self.old_node_path = node_path layer =", "self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_source, model, layer_path) def redo(self): super(SetNodeExecuteSources, self).redo() val", "= layer.color if color: color = color.lower() open_layer_colors += [color]", "node_path, attr_name, model, layer_path): node = model.comp_layer.lookup(node_path) data = model.stage.get_node_attr_data(node,", "layer.get_color(local=True) layer.color = self.color else: self.old_color = layer.get_color(fallback_to_local=False) layer.set_color_over(self.color) self.redo_effected_layer(self.model.top_layer.real_path)", "super(RevertNode, self).undo() self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection def redo(self): self.created_node_paths =", "this command. Since a single command can effect layers in", "False comp_layer = self.model.comp_layer if node is not None: #", "__init__(self, paths, model): super(SetSelection, self).__init__() self.new_paths = paths self.model =", "def __init__(self, node_path, attr_name, value, model, layer_path): data = {META_ATTRS.VALUE:", "super(SetLayerAlias, self).__init__(model) self.layer_path = layer_path self.alias = alias self.old_alias =", "self.redo_effected_layer(layer.real_path) class SetSelection(QUndoCommand): \"\"\"Select Nodes and Connections\"\"\" def __init__(self, paths,", "not display_node: continue # add node if it doesn't exist", "INTERNAL_ATTRS.NAME) if name != prev_name: self.stage.set_node_name(node, name=prev_name, layer=layer, force=True) #", "INTERNAL_ATTRS.INSTANCE_PATH, instance_path, model, layer_path) def redo(self): super(SetNodeInstance, self).redo() txt =", "consistent delta. x_delta = pos[0] - prev_pos[0] y_delta = pos[1]", "layers in different ways. :param layer_path: string of layer real", "model.lookup_layer(layer_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(name=name, layer=layer, parent_path=parent_path, layer_only=True)", "the undo queue # was moved to an index before", "super(DuplicateNodes, self).__init__(model) self.node_paths = node_paths self.descendants = descendants self.source_layer_path =", "with context to this command. Since a single command can", "y_delta = pos[1] - prev_pos[1] delta_str = '{}, {}'.format(x_delta, y_delta)", "func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if", "self.rename_attribute(layer, self.attr_name, self.new_attr_name) self.redo_effected_layer(layer.real_path) def rename_attribute(self, layer, attr_name, new_attr_name): node", "we know this node should be # named what it", "nodes\"\"\" def __init__(self, node_path, attr_name, model, layer_path): node = model.comp_layer.lookup(node_path)", "node = layer.lookup(self.node_path) self.stage.rename_node_attr(node, attr_name, new_attr_name, layer) self.model.update_comp_layer() old_name =", "value\"\"\" def __init__(self, node_path, code_lines, model, layer_path): super(SetCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE,", "SoloToggleLayer(NxtCommand): \"\"\"Toggles soloing an existing layer\"\"\" def __init__(self, layer_path, model):", "def undo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.new_attr_name, self.attr_name) self.undo_effected_layer(layer.real_path) @processing", "| (first_effected_by_undo, first_effected_by_redo) \"\"\" first_eff_by_undo = False first_eff_by_redo = False", "StageModel :param layer_path: String of layer realpath :param other_removed_nodes: list", "in this event loop. \"\"\" super(DeleteNode, self).__init__(model) self.layer_path = layer_path", "String of node path :param model: StageModel :param layer_path: String", "exec input on {}\".format(self.node_path)) class RevertNode(DeleteNode): \"\"\"Localize nodes\"\"\" def __init__(self,", "self).__init__(model) self.node_paths = node_paths self.descendants = descendants self.source_layer_path = source_layer_path", "\"\"\"Clear all the breakpoints for a given layer\"\"\" def __init__(self,", "= len(self.stage._sub_layers) if 0 < self.insert_idx <= sub_layer_count: parent_layer =", "existing layer\"\"\" def __init__(self, file_path, idx, model, chdir): super(ReferenceLayer, self).__init__(model)", "self).__init__(model) self.new_layer_path = None self.model = model self.stage = model.stage", "node_paths, value, model, layer_path): super(SetNodeBreakPoint, self).__init__() self.node_paths = node_paths self.value", "parent_layer, SAVE_KEY.FILEPATH: self.file_path, SAVE_KEY.REAL_PATH: real_path, SAVE_KEY.COLOR: layer_color, SAVE_KEY.ALIAS: self.file_name }", "= model.stage comp_layer = model.comp_layer new_node_paths = [] new_nodes =", "self.attr_name = attr_name self.new_attr_name = new_attr_name self.model = model self.stage", "= self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_color_index", "self.setText(\"Remove breakpoint from {}\".format(path_str)) class ClearBreakpoints(QUndoCommand): \"\"\"Clear all the breakpoints", "= True if not created_node: self.return_value = self.stage.node_setattr_data(node, self.attr_name, layer=layer,", "self.model.top_layer) self.model._set_node_pos(new_node_path, new_pos, layer) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection =", "layer\"\"\" def __init__(self, layer_path, model): idx = model.lookup_layer(layer_path).layer_idx() super(RemoveLayer, self).__init__(layer_path,", "instance path to {}\".format(self.node_path)) class RevertInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model,", "= self.model.lookup_layer(self.layer_path) for node_path, new_pos in self.new_positions.items(): self.model._set_node_pos(node_path=node_path, pos=new_pos, layer=layer)", "self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path)", "@processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.created_node_paths = [] dirty_nodes", "undo(self): new_layer = self.model.lookup_layer(self.new_layer_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer)", "layer) self.model.nodes_changed.emit(tuple(self.node_paths)) if len(self.node_paths) == 1: path_str = self.node_paths[0] else:", "self.stage.delete_node(n, layer=self.model.target_layer, remove_layer_data=False) layers = [self.model.target_layer] for node_path, all_data in", "self.prev_starts = self.model.get_start_nodes(layer) self.prev_breaks = user_dir.breakpoints dirty_nodes = [] node", "remove_layer_data=False) dirty_nodes += dirty node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node)", "layer) node_data = self.stage.get_node_data(node, layer) node_data['pos'] = self.model.get_node_pos(node_path) node_data['name'] =", "local_attrs = self.stage.get_node_local_attr_names(node_path, layers) for attr in local_attrs: if attr", "layer.lookup(self.node_path) self.stage.rename_node_attr(node, attr_name, new_attr_name, layer) self.model.update_comp_layer() old_name = nxt_path.make_attr_path(self.node_path, attr_name)", "self.node_data = {} self.prev_starts = self.model.get_start_nodes(layer) self.prev_breaks = user_dir.breakpoints dirty_nodes", "if not created_node: self.return_value = self.stage.node_setattr_data(node, self.attr_name, layer=layer, create=True, comp_layer=comp,", "self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(LAYERS.TOP) self.undo_effected_layer(self.new_layer_path) self.model.layer_removed.emit(self.new_layer_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers)", "would avoid the need for a recomp if layer.descendants(self.node_path): self.recomp", "node_data nodes += [node] # get current node hierarchy information", "dirty_nodes += dirty self.node_path = layer.get_node_path(nodes[0]) self.model._set_node_pos(node_path=self.node_path, pos=self.pos, layer=layer) self.model.nodes_changed.emit(tuple(set(dirty_nodes)))", "self.model.top_layer: state = not layer.get_muted(local=True) layer.set_muted(state) self.layer_paths.append(layer.real_path) else: state =", "layer real path :return: None \"\"\" layer_unsaved = layer_path in", "@processing def undo(self): new_layer = self.model.lookup_layer(self.new_layer_path) if new_layer in self.stage._sub_layers:", "len(self.node_paths) == 1: nodes_str = self.node_paths[0] else: nodes_str = 'nodes'", "= model self.stage = model.stage self.node_paths = node_paths # resulting", "= self.model.selection self.prev_starts = [] self.prev_breaks = {} self.node_path =", "self.model.selection self.prev_starts = [] self.prev_breaks = {} self.node_path = node_path", "node is not None: _, dirty = self.stage.delete_node(node, layer, remove_layer_data=False)", "= layer.lookup(ancestor_path) if ancestor: setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER, ancestor_child_order) self.model.selection = self.prev_selection", "= self.model.get_pos_offset(old_root, (GRID_SIZE * 14, GRID_SIZE), self.model.top_layer) self.model._set_node_pos(new_node_path, new_pos, layer)", "undo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer parent = self.node_data['parent']", "model, layer_path) def redo(self): super(SetNodeComment, self).redo() self.setText(\"Changed comment on {}\".format(self.node_path))", "new_selection.append(target_layer.get_node_path(new[0])) # process new nodes for new_node in new: #", "+ \"ms\") def redo_debug(cmd, start): update_time = str(int(round((time.time() - start)", "eff_by_undo: self._layers_effected_by_me[layer_path] = {'undo': False, 'redo': True} self.model.effected_layers.add(layer_path) else: #", "deletes node, this behavior is depended upon! :param node_path: String", "self.setText(\"Add {} attr to {}\".format(self.attr_name, self.node_path)) class DeleteAttribute(AddAttribute): \"\"\"Delete attribute", "each node can be placed visually beside it's # original", "data self.prev_selection = self.model.selection self.prev_starts = [] self.prev_breaks = {}", "That way duplicate can just be a # setattr. The", "model.get_node_exec_in(node_path) super(LocalizeExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_path, model, layer_path) def redo(self): super(LocalizeExecPath,", "layer_path): data = {META_ATTRS.VALUE: value} super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data, model,", "parent = self.node_data['parent'] # We don't want to fix names", "else: path_str = str(self.node_paths) if self.value: self.setText(\"Collapsed {}\".format(path_str)) else: self.setText(\"Expanded", "self.model.update_comp_layer(rebuild=True) self.model.layer_mute_changed.emit((self.layer_path,)) self.setText(\"Toggle {} muted.\".format(layer.get_alias())) class SoloToggleLayer(NxtCommand): \"\"\"Toggles soloing an", "for c in layer_color_index: if c not in open_layer_colors: layer_color", "self.prev_selection = self.model.selection # resulting nodes self.new_node_paths = [] @processing", "[new_node_path] # self.model.node_added.emit(new_node_path) # set position has_parent = self.model.node_has_parent(new_node_path, target_layer)", "self.model.layer_added.emit(self.new_layer_path) self.setText(\"New layer {}\".format(self.new_layer_path)) class ReferenceLayer(NxtCommand): \"\"\"Refernce existing layer\"\"\" def", "self.model.lookup_layer(self.target_layer_path) self.redo_effected_layer(target_layer.real_path) for node_path in self.node_paths: node = source_layer.lookup(node_path) #", "1000))) logger.debug(cmd.text() + \" | \" + update_time + \"ms\")", "source_layer.layer_idx() > 0: rm_layer_data = True else: rm_layer_data = False", "def __init__(self, node_path, attr_name, model, layer_path): super(DeleteAttribute, self).__init__(node_path, attr_name, None,", "def __init__(self, file_path, idx, model, chdir): super(ReferenceLayer, self).__init__(model) self.model =", "self.color else: self.old_color = layer.get_color(fallback_to_local=False) layer.set_color_over(self.color) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) self.setText(\"Set {}", "self.model.get_start_nodes(layer) self.prev_breaks = user_dir.breakpoints dirty_nodes = [] node = layer.lookup(self.node_path)", "+ INTERNAL number of attrs. super(DuplicateNodes, self).__init__(model) self.node_paths = node_paths", "redo(self): super(LocalizeExecPath, self).redo() self.setText(\"Localize exec input on {}\".format(self.node_path)) class RevertExecPath(SetNodeAttributeValue):", "layer) if new_path in list(self.model.top_layer.positions.keys()): source_layer = self.stage.get_node_source_layer(node) source_layer.positions.pop(new_path) for", "= self.model.lookup_layer(self.target_layer_path) # delete duplicated nodes for node_path in self.new_node_paths:", "import nxt_path from nxt.nxt_layer import LAYERS, SAVE_KEY from nxt.nxt_node import", "attr_name, new_attr_name, layer) self.model.update_comp_layer() old_name = nxt_path.make_attr_path(self.node_path, attr_name) new_name =", "layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.new_attr_name, self.attr_name)", "self.remove_attr = True self.setText(\"Add {} attr to {}\".format(self.attr_name, self.node_path)) class", "tuple(set(dirty)) self.undo_effected_layer(self.layer_path) if dirty_set != (self.node_path,): self.model.update_comp_layer(rebuild=True) else: self.model.nodes_changed.emit(dirty_set) @processing", "self.model.set_target_layer(LAYERS.TOP) self.model.update_comp_layer(rebuild=True) self.model.layer_removed.emit(self.real_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers) if", "is not None: self.model._set_attr_display_state(new_node_path, attr_state) # set position for un-parent", "def redo(self): layer = self.model.lookup_layer(self.layer_path) self.created_node_paths = [] dirty_nodes =", "= nxt_path.join_node_paths(parent_path, new_name) self.new_path = new_path super(InstanceNode, self).__init__(new_path, INTERNAL_ATTRS.INSTANCE_PATH, node_path,", "None: self.stage.delete_node(node, layer) idx = 0 for old_node_path in self.node_paths:", "[] self.remove_attr = False self.prev_data = {} self.recomp = attr_name", "Built-in import copy import logging import time # External from", "or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: changed_attrs = ()", "\" \"{} to {}\".format(self.node_path, self.data.get(META_ATTRS.VALUE))) self.setText(txt) class SetNodeEnabledState(SetNodeAttributeValue): \"\"\"Set node", "else: self.setText(\"Disabled {}\".format(self.node_path)) class SetNodeCollapse(NxtCommand): \"\"\"Set the node collapse state\"\"\"", "first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo'] except KeyError: pass return first_eff_by_undo, first_eff_by_redo def", "@processing def redo(self): new_selection = [] self.new_node_paths = [] source_layer", "node_paths, model): super(LocalizeNodes, self).__init__(model) self.node_paths = node_paths self.model = model", "ancestor = layer.lookup(ancestor_path) if ancestor: self.stage.set_node_child_order(ancestor, child_order, layer) if new_path", "= [] self.new_node_paths = [] source_layer = self.model.lookup_layer(self.source_layer_path) target_layer =", "new_node_table: display_node = comp_layer.lookup(nn_p) if display_node is not None: display_child_order", "in colors.LAYER_COLORS] open_layer_colors = [] for layer in self.stage._sub_layers: color", "parent self.node_path_data = self.stage.parent_nodes(nodes, self.parent_node_path, layer) self.new_node_paths = list(self.node_path_data.values()) idx", "new node path to the list and emit model signal", "in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) @processing def redo(self): layer =", "{}'.format(str(self.new_paths))) class AddSelection(SetSelection): def __init__(self, paths, model): self.added_paths = paths", "of layer :param model: StageModel \"\"\" super(SetLayerColor, self).__init__(model) self.layer_path =", "to # set multiple attr's data. That way duplicate can", "on {}\".format(self.node_path)) class RenameAttribute(NxtCommand): \"\"\"Rename attribute\"\"\" def __init__(self, node_path, attr_name,", "nxt_io from nxt import GRID_SIZE import nxt_editor logger = logging.getLogger(nxt_editor.LOGGER_NAME)", "new_layer = self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) self.new_layer_path = new_layer.real_path self.redo_effected_layer(new_layer.real_path) # Fixme:", "else: path_str = str(self.node_paths) if self.value: self.setText(\"Add breakpoint to {}\".format(path_str))", "self.target_layer_path = target_layer_path self.stage = model.stage # get undo data", "in self.model.effected_layers eff_by_undo, eff_by_redo = self._get_effects(layer_path) if not eff_by_undo and", "model, layer_path) def redo(self): super(SetNodeChildOrder, self).redo() self.setText(\"Change child order on", "layer = self.model.lookup_layer(self.layer_path) node = layer.lookup(self.node_path) self.data = self.stage.get_node_attr_data(node, self.attr_name,", "real path of layer :param model: StageModel \"\"\" super(SetLayerColor, self).__init__(model)", "new: # add new node path to the list and", "effected in the model we mark it as effected. This", "__init__(self, node_path, model, layer_path): exec_path = model.get_node_exec_in(node_path) super(LocalizeExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN,", "# set position has_parent = self.model.node_has_parent(new_node_path, target_layer) if not has_parent", "self.model.layer_removed.emit(self.new_layer_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers) if 0 <", "# parent self.node_path_data = self.stage.parent_nodes(nodes, self.parent_node_path, layer) self.new_node_paths = list(self.node_path_data.values())", "called layer = self.model.lookup_layer(self.layer_path) node = layer.lookup(self.node_path) self.data = self.stage.get_node_attr_data(node,", "self.stage.delete_node(node, layer) idx = 0 for old_node_path in self.node_paths: new_node_path", "be mutated by the stage as it deletes node, this", "model self.stage = model.stage self.prev_selection = self.model.selection self.prev_node_data = {}", "= tuple([self.node_path] + self.created_node_paths) self.model.nodes_changed.emit(changed) self.model.selection = self.prev_selection # undo_debug(self,", "nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(name=name, layer=layer, parent_path=parent_path, layer_only=True) super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME,", "self.created_node_paths += new_paths # self.model.node_added.emit(node_path) # preserve original data node_data['data']", "__init__(self, node_path, attr_name, value, model, layer_path): data = {META_ATTRS.VALUE: value}", "not None: self.stage.delete_node(node, layer) idx = 0 for old_node_path in", "source_layer_path, target_layer_path): src_name = nxt_path.node_name_from_node_path(node_path) parent_path = nxt_path.get_parent_path(node_path) new_name =", "layer_path, model): \"\"\"Sets the color for a given layer, if", "= all_data['data'] child_order = all_data['data'].get('child_order', []) apply_data['child_order'] = child_order apply_data['attributes']", "= getattr(node, INTERNAL_ATTRS.PARENT_PATH) name = getattr(node, INTERNAL_ATTRS.NAME) is_break = self.model.get_is_node_breakpoint(self.node_path,", "else: common_parent_nodes[prev_parent_path][node] = old_path child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER) if child_order_tuple: ancestor_path,", "list and emit model signal new_node_path = target_layer.get_node_path(new_node) self.new_node_paths +=", "def wrapper(self): self.model.processing.emit(True) func(self) self.model.processing.emit(False) return wrapper class NxtCommand(QUndoCommand): def", "a layer as saved we reset the class attr `_first_effected_by_redo`", "super(AddSelection, self).redo() self.setText('Add {} to selection'.format(self.added_paths)) class RemoveFromSelection(SetSelection): def __init__(self,", "zero effect on the layer try: self.model.effected_layers.remove(layer_path) except KeyError: #", "layer realpath :param other_removed_nodes: list of node paths that will", "data self.pos = pos or [0.0, 0.0] self.prev_selection = self.model.selection", "model.stage self.node_paths = node_paths # resulting nodes self.node_path_data = {}", "node is not None: self.stage.delete_node(node, layer) idx = 0 for", "dirty dirty_nodes += self.created_node_paths dirty_nodes += [self.node_path] self.undo_effected_layer(self.layer_path) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection", "model, layer_path) def redo(self): super(RevertCompute, self).redo() self.setText(\"Revert compute on {}\".format(self.node_path))", "path_str = str(self.node_paths) if self.value: self.setText(\"Collapsed {}\".format(path_str)) else: self.setText(\"Expanded {}\".format(path_str))", "idx=self.insert_idx) # Fixme: The next 2 lines each build once", "self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: self.model.attrs_changed.emit(changed_attrs) if", "layer=layer.layer_idx(), comp_layer=comp, fix_names=False) # Fixme: Targeted parenting would avoid the", "is placed in a list of descendants for each top", "state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_mute_changed.emit((self.layer_path,)) self.setText(\"Toggle {} muted.\".format(layer.get_alias())) class SoloToggleLayer(NxtCommand): \"\"\"Toggles", "the layer is not a top layer the top layer", "self.model._add_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._add_start_node(self.node_path, layer) # restore layer data", "2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.real_path) self.model.layer_added.emit(self.real_path) self.setText(\"Added reference", "pos[1] - prev_pos[1] delta_str = '{}, {}'.format(x_delta, y_delta) if len(self.new_positions)", "start): update_time = str(int(round((time.time() - start) * 1000))) logger.debug(\"Undo \"", "# TODO: We should make another base command class that", "behavior is depended upon! :param node_path: String of node path", "Removed by a save action pass self._layers_effected_by_me[layer_path] = {'undo': eff_by_undo,", "# This might be a bug? We don't touch the", "node self.node_path = None self.created_node_paths = [] @processing def undo(self):", "node_path, model, layer_path, others): super(RevertNode, self).__init__(node_path, model, layer_path, others) self.rebuild", "== INTERNAL_ATTRS.INSTANCE_PATH: dirties += self.return_value if self.attr_name in INTERNAL_ATTRS.ALL: dirties", "layer.lookup(parent_path) ancestor_path = parent_path child_order = [] if parent_node: child_order", "self).__init__(layer_path, idx, model, None) self.text = \"Removed reference to {}\".format(layer_path)", "this graph\"\"\" def __init__(self, node_path, model, source_layer_path, target_layer_path): src_name =", "self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = self.prev_selection @processing def redo(self): layer = self.model.lookup_layer(self.layer_path)", "= {node: old_path} else: common_parent_nodes[prev_parent_path][node] = old_path child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER)", "node _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data) dirty_nodes +=", "prev_parent_path not in list(common_parent_nodes.keys()): common_parent_nodes[prev_parent_path] = {node: old_path} else: common_parent_nodes[prev_parent_path][node]", "except KeyError: pass return first_eff_by_undo, first_eff_by_redo def reset_layer_effected(self, layer_just_saved): \"\"\"When", "we mark it as effected. This case happens when undo", "self.setText(\"{} renamed to {}\".format(self.old_node_path, self.return_value)) class DuplicateNodes(NxtCommand): \"\"\"Duplicate nodes on", "others): super(RevertNode, self).__init__(node_path, model, layer_path, others) self.rebuild = False #", "apply_data, self.model.comp_layer) local_attrs = self.stage.get_node_local_attr_names(node_path, layers) for attr in local_attrs:", "if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(LAYERS.TOP) self.undo_effected_layer(self.new_layer_path) self.model.layer_removed.emit(self.new_layer_path)", "= self.stage.get_node_attr_data(node, self.attr_name, layer) def undo(self): super(DeleteAttribute, self).redo() layer =", "start) * 1000))) logger.debug(\"Undo \" + cmd.text() + \" |", "dirty self.node_path = layer.get_node_path(nodes[0]) self.model._set_node_pos(node_path=self.node_path, pos=self.pos, layer=layer) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection =", "self.stage = model.stage @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if", "layer_unsaved: return if not eff_by_undo: self._layers_effected_by_me[layer_path] = {'undo': False, 'redo':", "[] # get the node node = layer.lookup(self.node_path) dirties =", "self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data) dirty_nodes += dirty dirty_nodes += self.created_node_paths", "model, layer_path): data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment} super(SetAttributeComment, self).__init__(node_path, attr_name, data,", "self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) def redo(self): # Overload remove attr here to", "if layer.descendants(self.node_path): self.recomp = True created_node = True self.created_node_paths +=", "= (\"Set inst path on \" \"{} to {}\".format(self.node_path, self.data.get(META_ATTRS.VALUE)))", "layer with context to this command. Since a single command", "the layer we mark it as such by setting the", "= layer.lookup(self.node_path) self.prev_data = self.stage.get_node_attr_data(node, self.attr_name, layer, quiet=True) if self.prev_data:", "LocalizeCompute(SetNodeAttributeValue): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, model, layer_path): comp_layer =", "self.setText(\"Localize compute on {}\".format(self.node_path)) class LocalizeInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model,", "base command class that can be used to # set", "= {} self.created_node_paths = [] layer = self.model.target_layer for node_path", "None: top_node = node top_node_path = layer.get_node_path(top_node) top_node_descendant_list = node_hierarchy_data.get(top_node,", "\"\"\"Set node execute sources\"\"\" def __init__(self, node_path, exec_source, model, layer_path):", "self.new_node_paths = [] self.created_node_paths = [] # get node selection", "_, dirty = self.stage.delete_node(node, layer, remove_layer_data=False) dirty_nodes += dirty node", "a recomp if layer.descendants(self.node_path): self.recomp = True created_node = True", "= model.get_node_instance_path(node_path, model.comp_layer, expand=False) super(LocalizeInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, inst_path, model, layer_path)", "self.prev_breaks self.model.nodes_changed.emit(tuple(self.prev_breaks)) @processing def redo(self): self.prev_breaks = user_dir.breakpoints.get(self.layer_path, []) if", "1000))) logger.debug(\"Undo \" + cmd.text() + \" | \" +", "position if self.parent_node_path != nxt_path.WORLD: prev_pos = node_data['pos'] source_layer =", "= model.selection new_paths = curr_selection + paths super(AddSelection, self).__init__(new_paths, model)", "self.setText(\"Set {} exec input to {}\".format(self.node_path, val)) class SetNodeBreakPoint(QUndoCommand): \"\"\"Set", "in self.node_paths: new_node_path = self.new_node_paths[idx] attr_state = self.model.remove_attr_display_state(new_node_path) if attr_state", "self.attr_name, layer) def undo(self): super(DeleteAttribute, self).redo() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path)", "class RemoveLayer(ReferenceLayer): \"\"\"Remove existing layer\"\"\" def __init__(self, layer_path, model): idx", "= descendants self.source_layer_path = source_layer_path self.target_layer_path = target_layer_path self.stage =", "is not None: self.model._set_attr_display_state(self.node_path, attr_display) user_dir.breakpoints = self.prev_breaks ancestor_tuple =", "LAYERS, SAVE_KEY from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger) from", "def redo(self): super(SetAttributeComment, self).redo() attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) self.setText(\"Changed comment", "self.prev_values = {} @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path)", "original data node_data['data'] = get_node_as_dict(target_node) # localize source node self.stage.transfer_node_data(target_node,", "def redo(self): super(SetNodeInstance, self).redo() txt = (\"Set inst path on", "undo self.prev_node_data = {} @processing def undo(self): layer = self.model.target_layer", "dirty + [self.node_path] if self.node_path in self.model.selection: fix_selection = self.model.selection[:]", "class LocalizeUserAttr(SetNodeAttributeData): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, attr_name, model, layer_path):", "__init__(self, node_path, model, layer_path, other_removed_nodes): \"\"\"Delete node from the layer", "parent_path=parent_path, layer=layer) for parent_path, nodes_dict in common_parent_nodes.items(): for node, old_path", "+ self.created_node_paths) self.model.nodes_changed.emit(changed) self.model.selection = self.prev_selection # undo_debug(self, start) @processing", "prev_pos[1] delta_str = '{}, {}'.format(x_delta, y_delta) if len(self.new_positions) == 1:", "node_path, model, layer_path): super(RevertCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, [], model, layer_path) def", "func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) if len(self.node_paths) == 1: path_str = self.node_paths[0]", "[ancestor_path, child_order] self.prev_node_data[node_path] = node_data nodes += [node] # get", "self.model.update_comp_layer(rebuild=True) self.model.selection = list(self.node_path_data.values()) if len(self.node_paths) == 1: path_str =", "add node if it doesn't exist on the target layer", "self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) if len(self.node_paths) == 1: path_str =", "existing layer\"\"\" def __init__(self, layer_path, model): idx = model.lookup_layer(layer_path).layer_idx() super(RemoveLayer,", "__init__(self, file_path, file_name, idx, model, chdir): super(NewLayer, self).__init__(model) self.new_layer_path =", "= not layer.get_muted(local=True) layer.set_muted(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_muted(local=False)", "self.new_layer_path = None self.model = model self.stage = model.stage self.insert_idx", "the first to effect the layer we mark it as", "class SetNodeComment(SetNodeAttributeValue): \"\"\"Set node comment\"\"\" def __init__(self, node_path, comment, model,", "_, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data) dirty_nodes += dirty", "The list will be mutated by the stage as it", "on {}\".format(self.node_path)) class LocalizeExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): exec_path", "= {} @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) for", "= ancestors[0] ancestor_path = layer.get_node_path(ancestor) child_order = self.stage.get_node_child_order(ancestor) node_data[INTERNAL_ATTRS.CHILD_ORDER] =", "if data.get('instance'): apply_data['instance'] = data['instance'] self.stage.transfer_node_data(node, self.model.target_layer, apply_data, self.model.comp_layer) local_attrs", "= layer.get_node_path(closest_ancestor) if closest_ancestor_path: ancestor_child_order = getattr(closest_ancestor, INTERNAL_ATTRS.CHILD_ORDER) self.node_data['ancestor_child_order'] =", "self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += self.return_value if self.attr_name in INTERNAL_ATTRS.ALL:", "layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.color = self.old_color", "self.node_path)) class DeleteAttribute(AddAttribute): \"\"\"Delete attribute on a node\"\"\" def __init__(self,", "time # External from Qt.QtWidgets import QUndoCommand # Internal from", "in paths: try: new_selection.remove(path) except ValueError: continue super(RemoveFromSelection, self).__init__(new_selection, model)", "node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path) new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy, parent=None, layer=layer, comp_layer=comp_layer)", "model, layer_path): super(RevertCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, [], model, layer_path) def redo(self):", "self.setText(\"Disabled {}\".format(self.node_path)) class SetNodeCollapse(NxtCommand): \"\"\"Set the node collapse state\"\"\" def", "self).__init__(model) self.node_path = node_path self.attr_name = attr_name self.new_attr_name = new_attr_name", "model, layer_path) def redo(self): super(RevertInstancePath, self).redo() self.setText(\"Revert instance path on", "self.node_paths: node_data = {} display_node = self.model.comp_layer.lookup(node_path) if not display_node:", "the need for a recomp if layer.descendants(self.node_path): self.recomp = True", "self.redo_effected_layer(layer.real_path) self.setText('Added node: {}'.format(self.node_path)) class DeleteNode(NxtCommand): def __init__(self, node_path, model,", "on {}\".format(self.node_path)) class RevertExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertExecPath,", "undo effects the # layer, meaning the layer has been", "self.node_path = node_path self.attr_name = attr_name self.new_attr_name = new_attr_name self.model", "layer_path, model): super(SoloToggleLayer, self).__init__(model) self.layer_path = layer_path self.model = model", "self.insert_idx = idx self.file_path = file_path self.file_name = file_name self.chdir", "self.redo_effected_layer(layer.real_path) self.model.selection = self.prev_selection if len(self.node_paths) == 1: path_str =", "= self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.attr_name, self.new_attr_name) self.redo_effected_layer(layer.real_path) def rename_attribute(self, layer, attr_name,", "idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection @processing def redo(self):", "import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger) from nxt import nxt_io from", "first_effected_by_redo) \"\"\" first_eff_by_undo = False first_eff_by_redo = False try: first_eff_by_undo", "= [] # delete any created nodes for node_path in", "self.model.comp_layer dirties = [self.node_path] # delete any created nodes for", "parent=None, layer=layer, comp_layer=comp_layer) for nn_p, n in new_node_table: display_node =", "self.model = model self.stage = model.stage self.node_paths = node_paths #", "{} self.new_node_paths = [] self.created_node_paths = [] nodes = []", "if new_path in list(self.model.top_layer.positions.keys()): source_layer = self.stage.get_node_source_layer(node) source_layer.positions.pop(new_path) for parent_path,", "doesn't exist on the target layer target_node = self.model.target_layer.lookup(node_path) if", "not layer.get_muted(local=False) self.model.top_layer.set_mute_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_mute_changed.emit((self.layer_path,)) self.setText(\"Toggle {} muted.\".format(layer.get_alias()))", "else: if (self.remove_attr or self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)):", "def __init__(self, node_path, comment, model, layer_path): super(SetNodeComment, self).__init__(node_path, INTERNAL_ATTRS.COMMENT, comment,", "self.new_node_paths += [new_node_path] # self.model.node_added.emit(new_node_path) # set position has_parent =", "self.nice_attr_name = attr_name self.attr_name = attr_name self.data = data self.stage", "INTERNAL_ATTRS.INSTANCE_PATH, node_path, model, target_layer_path) def redo(self): node_path = self.data.get(META_ATTRS.VALUE) layer", "& the undo effects it eff_by_redo = False self.model.effected_layers.add(layer_path) elif", "= self.stage.add_node(name=self.name, data=self.data, parent=self.parent_path, layer=layer.layer_idx(), comp_layer=self.model.comp_layer) dirty_nodes += dirty self.node_path", "else: changed_attrs = () for dirty in dirties: attr_path =", "= idx self.file_path = file_path self.real_path = nxt_path.full_file_expand(self.file_path, chdir) @processing", "a given layer\"\"\" def __init__(self, model, layer_path): super(ClearBreakpoints, self).__init__() self.model", "super(LocalizeInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, inst_path, model, layer_path) def redo(self): super(LocalizeInstancePath, self).redo()", "self.model.layer_added.emit(self.real_path) self.setText(\"Added reference to {}\".format(self.real_path)) class RemoveLayer(ReferenceLayer): \"\"\"Remove existing layer\"\"\"", "= self.stage.get_node_attr_data(node, self.attr_name, layer, quiet=True) if self.prev_data: self.prev_data = copy.deepcopy(self.prev_data)", "class SetNodeAttributeData(NxtCommand): \"\"\"Set attribute value\"\"\" def __init__(self, node_path, attr_name, data,", "breakpoints for a given layer\"\"\" def __init__(self, model, layer_path): super(ClearBreakpoints,", "() for dirty in dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs", "pos = [pos[0] + 20, pos[1] + 20] self.model._set_node_pos(new_node_path, pos,", "INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: changed_attrs = () for dirty in dirties:", "command effects the layer not the redo eff_by_redo = False", "restore name prev_name = node_data['name'] name = getattr(node, INTERNAL_ATTRS.NAME) if", "be set if undo is called layer = self.model.lookup_layer(self.layer_path) node", "self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH,", "n = target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n, target_layer,", "def redo(self): layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.prev_values = {} for", "build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.new_layer_path) self.model.layer_added.emit(self.new_layer_path) self.setText(\"New layer {}\".format(self.new_layer_path)) class ReferenceLayer(NxtCommand):", "setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order) new_node_paths += [nn_p] new_nodes += [n] return", "node new, dirty = self.stage.duplicate_node(node=node, layer=target_layer, descendants=self.descendants) new_selection.append(target_layer.get_node_path(new[0])) # process", "layer_path self.stage = model.stage # command data self.pos = pos", "= self.stage.get_node_source_layer(node) if source_layer.layer_idx() > 0: rm_layer_data = True else:", "model, layer_path): super(SetNodeEnabledState, self).__init__(node_path, INTERNAL_ATTRS.ENABLED, value, model, layer_path) def redo(self):", "{}\".format(path_str, self.parent_node_path)) class AddAttribute(SetNodeAttributeData): \"\"\"Add an attribute to a node.\"\"\"", "list(self.model.top_layer.positions.keys()): source_layer = self.stage.get_node_source_layer(node) source_layer.positions.pop(new_path) for parent_path, nodes_dict in common_parent_nodes.items():", "[color] layer_color = layer_color_index[0] for c in layer_color_index: if c", "self.text = \"Removed reference to {}\".format(layer_path) @processing def undo(self): super(RemoveLayer,", "once, relying on consistent delta. x_delta = pos[0] - prev_pos[0]", "layer) self.node_data['start'] = is_start self.node_data['save_dict'] = get_node_as_dict(node) if self.node_data['break']: self.model._remove_breakpoint(self.node_path,", "layer store an overrides. :param color: string of new layer", "comp.get_node_dirties(self.node_path) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or self.created_node_paths or", "redo(self): sub_layer_count = len(self.stage._sub_layers) if 0 < self.insert_idx <= sub_layer_count:", "target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n, target_layer, remove_layer_data=True) self.model.selection", "Fixme: The next 2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.real_path)", "model, layer_path) class RenameNode(SetNodeAttributeValue): \"\"\"Rename node\"\"\" def __init__(self, node_path, name,", "\"real_path\": self.real_path, \"alias\": layer_data['name'] } layer_data.update(extra_data) self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) # Fixme:", "self.model.target_layer.lookup(node_path) if not node: continue data = all_data['data'] child_order =", "{}\".format(self.node_path)) class RevertInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertInstancePath, self).__init__(node_path,", "data at a # time and duplicate needs to get", "super(RevertNode, self).__init__(node_path, model, layer_path, others) self.rebuild = False # Tells", "breakpoints\") class SetNodeStartPoint(SetNodeAttributeValue): \"\"\"Set this node as the execution start", "parenting would avoid the need for a recomp if layer.descendants(self.node_path):", "node_paths self.descendants = descendants self.source_layer_path = source_layer_path self.target_layer_path = target_layer_path", "Nodes\"\"\" def __init__(self, node_paths, parent_node_path, model): super(ParentNodes, self).__init__(model) self.parent_node_path =", "@processing def undo(self): new_layer = self.model.lookup_layer(self.real_path) if new_layer in self.stage._sub_layers:", "redo(self): # Overload remove attr here to insure attr is", "# Built-in import copy import logging import time # External", "redo(self): super(SetNodeEnabledState, self).redo() if self.data.get(META_ATTRS.VALUE): self.setText(\"Enabled {}\".format(self.node_path)) else: self.setText(\"Disabled {}\".format(self.node_path))", "self.model.target_layer, display_node, self.model.comp_layer) self.prev_node_data[node_path] = node_data self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.redo_effected_layer(layer.real_path) self.model.selection =", "SetNodeCollapse(NxtCommand): \"\"\"Set the node collapse state\"\"\" def __init__(self, node_paths, value,", "{} exec input to {}\".format(self.node_path, val)) class SetNodeBreakPoint(QUndoCommand): \"\"\"Set node", "descendants, model, source_layer_path, target_layer_path): # TODO: We should make another", "undo_debug(self, start) @processing def redo(self): start = time.time() created_node =", "n is not None: self.stage.delete_node(n, layer=self.model.target_layer, remove_layer_data=False) layers = [self.model.target_layer]", "= parent_path parent_node = layer.lookup(parent_path) ancestor_path = parent_path child_order =", "else: layer.set_alias_over(self.old_alias) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path)", "data, model, layer_path) def redo(self): super(SetAttributeComment, self).redo() attr_path = nxt_path.make_attr_path(self.node_path,", "ParentNodes(NxtCommand): \"\"\"Parent Nodes\"\"\" def __init__(self, node_paths, parent_node_path, model): super(ParentNodes, self).__init__(model)", "return if layer_just_saved in self._layers_effected_by_me: if eff_by_undo: # This command", "redo(self): super(AddAttribute, self).redo() self.remove_attr = True self.setText(\"Add {} attr to", "node_path self.nice_attr_name = attr_name self.attr_name = attr_name self.data = data", "def undo(self): layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) for node_path, prev_value in", "cmd.text() + \" | \" + update_time + \"ms\") def", "source node self.stage.transfer_node_data(target_node, self.model.target_layer, display_node, self.model.comp_layer) self.prev_node_data[node_path] = node_data self.model.update_comp_layer(rebuild=bool(self.created_node_paths))", "model.stage.get_node_attr_data(node, attr_name, model.comp_layer) if META_ATTRS.SOURCE in data: data.pop(META_ATTRS.SOURCE) super(LocalizeUserAttr, self).__init__(node_path,", "self.prev_node_data = {} @processing def undo(self): layer = self.model.target_layer self.undo_effected_layer(layer.real_path)", "self.setText(\"Toggle {} muted.\".format(layer.get_alias())) class SoloToggleLayer(NxtCommand): \"\"\"Toggles soloing an existing layer\"\"\"", "if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += self.return_value if self.attr_name in", "has been saved and the undo queue # was moved", "mark it as effected. This case happens when undo is", "redo(self): super(SetNodeExecuteSources, self).redo() val = self.data.get(META_ATTRS.VALUE) if val is None:", "None \"\"\" layer_unsaved = layer_path in self.model.effected_layers eff_by_undo, eff_by_redo =", "= self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) comp = self.model.comp_layer dirties = [self.node_path] #", "INTERNAL_ATTRS.ENABLED, value, model, layer_path) def redo(self): super(SetNodeEnabledState, self).redo() if self.data.get(META_ATTRS.VALUE):", "as undo effects the # layer, meaning the layer has", "self).__init__(node_path, INTERNAL_ATTRS.ENABLED, value, model, layer_path) def redo(self): super(SetNodeEnabledState, self).redo() if", "dirties += comp.get_node_dirties(self.node_path) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or", "nxt_path.make_attr_path(self.node_path, self.nice_attr_name) val = str(self.data.get(META_ATTRS.VALUE)) self.setText(\"Set {} to {}\".format(attr_path, val))", "new_path = nxt_path.join_node_paths(parent_path, new_name) self.new_path = new_path super(InstanceNode, self).__init__(new_path, INTERNAL_ATTRS.INSTANCE_PATH,", "= [self.node_path] # delete any created nodes for node_path in", "color = layer.color if color: color = color.lower() open_layer_colors +=", "self.model.selection layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) comp = self.model.comp_layer self.remove_attr =", "quiet=True) if self.prev_data: self.prev_data = copy.deepcopy(self.prev_data) # set attribute value", "layer_path): inst_path = model.get_node_instance_path(node_path, model.comp_layer, expand=False) super(LocalizeInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, inst_path,", "self.node_data['ancestor_child_order'] = (closest_ancestor_path, ancestor_child_order[:]) # Attr display data attr_display =", "self.model.selection = self.prev_selection self.setText('Revert {}'.format(self.node_path)) class ParentNodes(NxtCommand): \"\"\"Parent Nodes\"\"\" def", "layer = self.model.lookup_layer(self.layer_path) self.created_node_paths = [] dirty_nodes = [] nodes,", "node = source_layer.lookup(node_path) # duplicate node new, dirty = self.stage.duplicate_node(node=node,", "self.new_node_paths[idx] attr_state = self.model.remove_attr_display_state(new_node_path) if attr_state is not None: self.model._set_attr_display_state(old_node_path,", "= self._layers_effected_by_me[layer_path]['redo'] except KeyError: pass return first_eff_by_undo, first_eff_by_redo def reset_layer_effected(self,", "@processing def undo(self): super(RemoveLayer, self).redo() self.setText(self.text) @processing def redo(self): super(RemoveLayer,", "self.model.lookup_layer(self.layer_path) new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0), layer) self.model._set_node_pos(self.new_path,", "@processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if self.value: func =", "new layer\"\"\" def __init__(self, file_path, file_name, idx, model, chdir): super(NewLayer,", "self.model._remove_breakpoint for node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) @processing def", "def redo(self): super(AddSelection, self).redo() self.setText('Add {} to selection'.format(self.added_paths)) class RemoveFromSelection(SetSelection):", "to selection'.format(self.added_paths)) class RemoveFromSelection(SetSelection): def __init__(self, paths, model): self.rem_paths =", "Layer Alias\"\"\" def __init__(self, alias, layer_path, model): super(SetLayerAlias, self).__init__(model) self.layer_path", "self.model.selection = self.prev_selection # undo_debug(self, start) @processing def redo(self): start", "reset_layer_effected(self, layer_just_saved): \"\"\"When the model marks a layer as saved", "model, layer): stage = model.stage comp_layer = model.comp_layer new_node_paths =", "model, layer_path): super(AddNode, self).__init__(model) self.name = name self.data = data", "and new_node_path != node_path: pos = self.model.get_node_pos(node_path) pos = [pos[0]", "named when it was deleted new_nodes, dirty = self.stage.add_node(name=self.node_data['name'], data=self.node_data['save_dict'],", "self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(LAYERS.TOP) self.undo_effected_layer(self.new_layer_path) self.model.layer_removed.emit(self.new_layer_path) @processing def redo(self): sub_layer_count", "+ 20, pos[1] + 20] self.model._set_node_pos(new_node_path, pos, layer=target_layer) self.model.selection =", "def __init__(self, node_paths, parent_node_path, model): super(ParentNodes, self).__init__(model) self.parent_node_path = parent_node_path", "layer.set_color_over(self.color) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) self.setText(\"Set {} color to {}\".format(layer.filepath, self.color)) def", "from nxt import nxt_path from nxt.nxt_layer import LAYERS, SAVE_KEY from", "undo queue # was moved to an index before this", "AddSelection(SetSelection): def __init__(self, paths, model): self.added_paths = paths curr_selection =", "def __init__(self, node_path, attr_name, new_attr_name, model, layer_path): super(RenameAttribute, self).__init__(model) self.node_path", "of layer real path :return: None \"\"\" eff_by_undo, eff_by_redo =", "layer.lookup(prev_parent_path) new_path = self.node_path_data[old_path] node = layer.lookup(new_path) if prev_parent_path not", "{} @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) for node_path,", "first_eff_by_redo = False try: first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo'] except KeyError: pass", "self.stage.get_top_node(node, self.model.target_layer) if top_node is None: top_node = node top_node_path", "self.model.node_added.emit(new_node_path) # set position has_parent = self.model.node_has_parent(new_node_path, target_layer) if not", "closest_ancestor_path = layer.get_node_path(closest_ancestor) if closest_ancestor_path: ancestor_child_order = getattr(closest_ancestor, INTERNAL_ATTRS.CHILD_ORDER) self.node_data['ancestor_child_order']", "layer_path): super(SetNodeExecuteSources, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_source, model, layer_path) def redo(self): super(SetNodeExecuteSources,", "nodes on this graph\"\"\" def __init__(self, node_paths, descendants, model, source_layer_path,", "path_str = self.node_paths[0] else: path_str = str(self.node_paths) self.setText('Localize {}'.format(str(path_str))) class", "not self.stage.node_attr_exists(node, self.attr_name): self.remove_attr = True if not created_node: self.return_value", "- 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_color_index = [str(k.name())", "color self.old_color = '' self.model = model self.stage = model.stage", "for path in paths: try: new_selection.remove(path) except ValueError: continue super(RemoveFromSelection,", "p in self.others[:]: self.others += comp_layer.get_node_dirties(p) _, dirty = self.stage.delete_node(node,", "= self.model.target_layer self.undo_effected_layer(layer.real_path) # undo parent common_parent_nodes = {} for", "top_node_descendant_list += [node] node_hierarchy_data[top_node_path] = top_node_descendant_list if not node_hierarchy_data: return", "nxt_path.make_attr_path(self.node_path, self.nice_attr_name) self.setText(\"Changed comment on {}\".format(attr_path)) class SetCompute(SetNodeAttributeValue): \"\"\"Set node", "command was the first to effect the layer we mark", "attr_state = self.model.remove_attr_display_state(old_node_path) if attr_state is not None: self.model._set_attr_display_state(new_node_path, attr_state)", "= [self.node_path] def redo(self): self.model.about_to_rename.emit() super(RenameNode, self).redo() self.node_path = self.return_value", "self.prev_breaks ancestor_tuple = self.node_data.get('ancestor_child_order') if ancestor_tuple: ancestor_path, ancestor_child_order = ancestor_tuple", "saved we reset the class attr `_first_effected_by_redo` to False. This", "self).redo() self.setText(\"Localize exec input on {}\".format(self.node_path)) class RevertExecPath(SetNodeAttributeValue): def __init__(self,", "= nxt_path.make_attr_path(self.node_path, new_attr_name) self.setText(\"Rename {} to {}\".format(old_name, new_name)) class SetAttributeComment(SetNodeAttributeData):", "= nxt_path.make_attr_path(self.node_path, self.nice_attr_name) self.setText(\"Changed comment on {}\".format(attr_path)) class SetCompute(SetNodeAttributeValue): \"\"\"Set", "@processing def redo(self): sub_layer_count = len(self.stage._sub_layers) if 0 < self.insert_idx", "if layer is self.model.top_layer: layer.color = self.old_color else: layer.set_color_over(self.old_color) self.undo_effected_layer(self.model.top_layer.real_path)", "def __init__(self, node_path, model, layer_path, others): super(RevertNode, self).__init__(node_path, model, layer_path,", "= model.stage.get_unique_node_name(name=name, layer=layer, parent_path=parent_path, layer_only=True) super(RenameNode, self).__init__(node_path, INTERNAL_ATTRS.NAME, new_name, model,", "!= nxt_path.WORLD: prev_pos = node_data['pos'] source_layer = self.stage.get_node_source_layer(node) self.model._set_node_pos(old_path, prev_pos,", "(unsaved) layers. If this command was the first to effect", "node code value\"\"\" def __init__(self, node_path, code_lines, model, layer_path): super(SetCompute,", "self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or self.created_node_paths or self.attr_name in", "a list of descendants for each top node so when", "INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(SetCompute, self).redo() self.setText(\"Changed compute", "= layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if not", "@processing def redo(self): self.prev_node_data = {} self.node_path_data = {} self.new_node_paths", "{META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment} super(SetAttributeComment, self).__init__(node_path, attr_name, data, model, layer_path) def redo(self):", "user_dir.breakpoints dirty_nodes = [] node = layer.lookup(self.node_path) # get node", "INTERNAL_ATTRS.INSTANCE_PATH, None, model, layer_path) def redo(self): super(RevertInstancePath, self).redo() self.setText(\"Revert instance", "class ClearBreakpoints(QUndoCommand): \"\"\"Clear all the breakpoints for a given layer\"\"\"", "layers = [self.model.target_layer] for node_path, all_data in self.prev_node_data.items(): apply_data =", "layer\"\"\" def __init__(self, model, layer_path): super(ClearBreakpoints, self).__init__() self.model = model", "for old_node_path in self.node_paths: new_node_path = self.new_node_paths[idx] attr_state = self.model.remove_attr_display_state(new_node_path)", "has already been marked as undo effects the # layer,", "node paths that will be deleted in this event loop.", "self.attr_name = attr_name self.data = data self.stage = model.stage self.layer_path", "new_paths, dirty = _add_node_hierarchy(node_path, self.model, layer) target_node = new_nodes[-1] self.created_node_paths", "self.stage.parent_nodes(nodes, self.parent_node_path, layer) self.new_node_paths = list(self.node_path_data.values()) idx = 0 for", "self).__init__(new_paths, model) def redo(self): super(AddSelection, self).redo() self.setText('Add {} to selection'.format(self.added_paths))", "def rename_attribute(self, layer, attr_name, new_attr_name): node = layer.lookup(self.node_path) self.stage.rename_node_attr(node, attr_name,", "if not eff_by_undo and layer_unsaved: return if not eff_by_undo: self._layers_effected_by_me[layer_path]", "self.model.selection = self.prev_selection def redo(self): self.created_node_paths = [] super(RevertNode, self).redo()", "super(RemoveLayer, self).undo() self.setText(self.text) class MuteToggleLayer(NxtCommand): \"\"\"Toggles muting an existing layer\"\"\"", "None) self.text = \"Removed reference to {}\".format(layer_path) @processing def undo(self):", "= source_layer.lookup(node_path) # duplicate node new, dirty = self.stage.duplicate_node(node=node, layer=target_layer,", "new nodes for new_node in new: # add new node", "path_str = str(self.node_paths) if self.value: self.setText(\"Add breakpoint to {}\".format(path_str)) else:", "AddNode(NxtCommand): \"\"\"Add a node to the graph\"\"\" def __init__(self, name,", "nodes on this graph\"\"\" def __init__(self, node_path, model, source_layer_path, target_layer_path):", "such by setting the class attr `_first_effected_by_redo` to True. :param", "self.model.processing.emit(False) return wrapper class NxtCommand(QUndoCommand): def __init__(self, model): super(NxtCommand, self).__init__()", "else: self.model.attrs_changed.emit(changed_attrs) if not self.recomp: changed = tuple([self.node_path] + self.created_node_paths)", "created_node = False self.prev_selection = self.model.selection layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path)", "node path to the list and emit model signal new_node_path", "@processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer:", "# Overload remove attr here to insure attr is deleted", "bug? We don't touch the top layer in redo... self.undo_effected_layer(self.stage.top_layer.real_path)", "= self.node_data.get('attr_display') if attr_display is not None: self.model._set_attr_display_state(self.node_path, attr_display) user_dir.breakpoints", "= model.stage self.prev_selection = self.model.selection self.prev_node_data = {} self.created_node_paths =", "preserve original data node_data['data'] = get_node_as_dict(target_node) # localize source node", "# Attr display data attr_display = self.model.get_attr_display_state(self.node_path) if attr_display is", "INTERNAL_ATTRS.INSTANCE_PATH: dirties += result if self.attr_name in INTERNAL_ATTRS.ALL: dirties +=", "self.model.nodes_changed.emit(dirties) else: self.model.attrs_changed.emit(changed_attrs) if not self.recomp: changed = tuple([self.node_path] +", "class SetNodeCollapse(NxtCommand): \"\"\"Set the node collapse state\"\"\" def __init__(self, node_paths,", "data = {META_ATTRS.VALUE: value} super(AddAttribute, self).__init__(node_path, attr_name, data, model, layer_path)", "self.stage = model.stage self.layer_path = layer_path self.prev_values = {} @processing", "= self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_alias = layer.get_alias(local=True) layer.set_alias(self.alias)", "= 'nodes' self.setText('Move {} {}'.format(nodes_str, delta_str)) self.redo_effected_layer(layer.real_path) class SetSelection(QUndoCommand): \"\"\"Select", "self).redo() self.setText(\"Changed compute on {}\".format(self.node_path)) class SetNodeComment(SetNodeAttributeValue): \"\"\"Set node comment\"\"\"", "self.setText(\"Toggle {} soloed.\".format(layer.get_alias())) class SetLayerColor(NxtCommand): def __init__(self, color, layer_path, model):", "self.node_path = node_path self.node_data = {} self.others = other_removed_nodes @processing", "nxt_path.get_parent_path(self.node_path) name = nxt_path.node_name_from_node_path(self.node_path) if self.attr_name in INTERNAL_ATTRS.ALL: self.return_value =", "= tuple(set(dirty)) self.undo_effected_layer(self.layer_path) if dirty_set != (self.node_path,): self.model.update_comp_layer(rebuild=True) else: self.model.nodes_changed.emit(dirty_set)", "# layer, meaning the layer has been saved and the", "self.model.comp_layer.collapse[node_path] = prev_value self.model.collapse_changed.emit(list(self.prev_values.keys())) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path)", "not to re-comp self.created_node_paths = [] self.node_path = node_path def", "func = self.model._add_breakpoint else: func = self.model._remove_breakpoint for node_path in", "self.data.get(META_ATTRS.VALUE) layer = self.model.lookup_layer(self.layer_path) new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE * 16,", "__init__(self, node_path, model, layer_path, others): super(RevertNode, self).__init__(node_path, model, layer_path, others)", "self.alias)) class NewLayer(NxtCommand): \"\"\"Add new layer\"\"\" def __init__(self, file_path, file_name,", "pass try: first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo'] except KeyError: pass return first_eff_by_undo,", "we undo an action after saving it. :param layer_just_saved: string", "target_layer_path) def redo(self): node_path = self.data.get(META_ATTRS.VALUE) layer = self.model.lookup_layer(self.layer_path) new_pos", "inst_path = model.get_node_instance_path(node_path, model.comp_layer, expand=False) super(LocalizeInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, inst_path, model,", "state\"\"\" def __init__(self, node_paths, value, model, layer_path): super(SetNodeCollapse, self).__init__(model) self.node_paths", "getattr(parent_node, INTERNAL_ATTRS.CHILD_ORDER) else: ancestors = layer.ancestors(node_path) if ancestors: ancestor =", "user_dir.breakpoints.get(self.layer_path, []) if self.layer_path in list(user_dir.breakpoints.keys()): user_dir.breakpoints.pop(self.layer_path) self.model.nodes_changed.emit(tuple(self.prev_breaks)) self.setText(\"Clear all", "__init__(self, node_path, attr_name, data, model, layer_path): super(SetNodeAttributeData, self).__init__(model) self.node_path =", "len(self.stage._sub_layers) if 0 < self.insert_idx <= sub_layer_count: parent_layer = self.stage._sub_layers[self.insert_idx", "[], model, layer_path) def redo(self): super(RevertCompute, self).redo() self.setText(\"Revert compute on", "= get_node_as_dict(target_node) # localize source node self.stage.transfer_node_data(target_node, self.model.target_layer, display_node, self.model.comp_layer)", "self.model = model self.layer_path = layer_path @processing def undo(self): layer", "= self.stage.delete_node(node, layer, remove_layer_data=False) dirty_nodes += dirty node = layer.lookup(self.node_path)", "self.new_paths = paths self.model = model self.prev_paths = self.model.selection def", "= self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0), layer) self.model._set_node_pos(self.new_path, new_pos, layer)", "@processing def toggle_state(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer:", "@processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if not self.value: func", "dirty_set = tuple(set(dirty)) self.undo_effected_layer(self.layer_path) if dirty_set != (self.node_path,): self.model.update_comp_layer(rebuild=True) else:", "= [] self.remove_attr = False self.prev_data = {} self.recomp =", "class LocalizeExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): exec_path = model.get_node_exec_in(node_path)", "new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE * 14, GRID_SIZE), self.model.top_layer) self.model._set_node_pos(new_node_path, new_pos,", "self.model.nodes_changed.emit(changed) self.model.selection = self.prev_selection # undo_debug(self, start) @processing def redo(self):", "= [] # get node selection for undo self.prev_selection =", "layer_color = layer_color_index[0] for c in layer_color_index: if c not", "of new layer alias (name) :param layer_path: real path of", "c break real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir) layer_data = {\"parent_layer\": parent_layer,", "super(LocalizeNodes, self).__init__(model) self.node_paths = node_paths self.model = model self.stage =", ":param layer_path: String of layer realpath :param other_removed_nodes: list of", "if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += result if self.attr_name in", "layer.get_alias(local=True) layer.set_alias(self.alias) else: self.old_alias = layer.get_alias(fallback_to_local=False) layer.set_alias_over(self.alias) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) self.setText(\"Set", "event loop. \"\"\" super(DeleteNode, self).__init__(model) self.layer_path = layer_path self.stage =", "is not None: self.stage.delete_node(n, target_layer, remove_layer_data=True) self.model.selection = self.prev_selection self.model.update_comp_layer(rebuild=True)", "execute sources\"\"\" def __init__(self, node_path, exec_source, model, layer_path): super(SetNodeExecuteSources, self).__init__(node_path,", "\"Removed reference to {}\".format(layer_path) @processing def undo(self): super(RemoveLayer, self).redo() self.setText(self.text)", "node_data['parent'] prev_parent_node = layer.lookup(prev_parent_path) new_path = self.node_path_data[old_path] node = layer.lookup(new_path)", "if self.value: func = self.model._add_breakpoint else: func = self.model._remove_breakpoint for", "self.model.node_added.emit(node_path) # preserve original data node_data['data'] = get_node_as_dict(target_node) # localize", "# This command has already been marked as undo effects", "node is not None: # delete node _, dirty =", "relying on consistent delta. x_delta = pos[0] - prev_pos[0] y_delta", "value\"\"\" def __init__(self, node_path, attr_name, data, model, layer_path): super(SetNodeAttributeData, self).__init__(model)", "data, model, layer_path): super(SetNodeAttributeData, self).__init__(model) self.node_path = node_path self.nice_attr_name =", "redo(self): start = time.time() created_node = False self.prev_selection = self.model.selection", "= model.selection @processing def undo(self): start = time.time() layer =", "class LocalizeCompute(SetNodeAttributeValue): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, model, layer_path): comp_layer", "attribute\"\"\" def __init__(self, node_path, attr_name, new_attr_name, model, layer_path): super(RenameAttribute, self).__init__(model)", "self).redo() self.setText(\"Revert compute on {}\".format(self.node_path)) class RenameAttribute(NxtCommand): \"\"\"Rename attribute\"\"\" def", "list of descendants for each top node so when #", "layer_path) def redo(self): super(AddAttribute, self).redo() self.remove_attr = True self.setText(\"Add {}", "= time.time() created_node = False self.prev_selection = self.model.selection layer =", "self.setText(\"Change child order on {}\".format(self.node_path)) class SetLayerAlias(NxtCommand): \"\"\"Set Layer Alias\"\"\"", "MuteToggleLayer(NxtCommand): \"\"\"Toggles muting an existing layer\"\"\" def __init__(self, layer_path, model):", "node_data.get(INTERNAL_ATTRS.CHILD_ORDER) if child_order_tuple: ancestor_path, child_order = child_order_tuple ancestor = layer.lookup(ancestor_path)", "attribute if it does not exist if not self.stage.node_attr_exists(node, self.attr_name):", "{} self.node_path = node_path self.node_data = {} self.others = other_removed_nodes", "node_path, attr_name, new_attr_name, model, layer_path): super(RenameAttribute, self).__init__(model) self.node_path = node_path", "None, model, layer_path) def redo(self): super(RevertInstancePath, self).redo() self.setText(\"Revert instance path", "True created_node = True self.created_node_paths += [self.node_path] node = layer.lookup(self.node_path)", "new_nodes = [] node_hierarchy = nxt_path.str_path_to_node_namespace(base_node_path) new_node_table, dirty = stage.add_node_hierarchy(node_hierarchy,", "save action pass def undo_effected_layer(self, layer_path): \"\"\"Removes layer from the", "= node_data['pos'] source_layer = self.stage.get_node_source_layer(node) self.model._set_node_pos(old_path, prev_pos, layer=source_layer) # delete", "self.prev_selection = model.selection @processing def undo(self): start = time.time() layer", "to re-comp self.created_node_paths = [] self.node_path = node_path def undo(self):", "1)) if cur_cmd is self: return if layer_just_saved in self._layers_effected_by_me:", "account for rebuilding proxy nodes for the dirty nodes dirty_set", "} new_layer = self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) self.new_layer_path = new_layer.real_path self.redo_effected_layer(new_layer.real_path) #", "model self.layer_paths = [] def undo(self): self.toggle_state() for layer_path in", "in redo... self.undo_effected_layer(self.stage.top_layer.real_path) attr_display = self.node_data.get('attr_display') if attr_display is not", "dirty nodes dirty_set = tuple(set(dirty)) self.undo_effected_layer(self.layer_path) if dirty_set != (self.node_path,):", "real_path, SAVE_KEY.COLOR: layer_color, SAVE_KEY.ALIAS: self.file_name } new_layer = self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx)", "self.model._add_breakpoint(self.node_path, layer) self.model._add_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._add_start_node(self.node_path, layer) # restore", "each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.real_path) self.model.layer_added.emit(self.real_path) self.setText(\"Added reference to {}\".format(self.real_path))", "model) def redo(self): super(RemoveFromSelection, self).redo() self.setText('Remove {} from selection'.format(self.rem_paths)) class", "layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.attr_name, self.new_attr_name) self.redo_effected_layer(layer.real_path) def rename_attribute(self, layer,", "self.model.effected_layers eff_by_undo, eff_by_redo = self._get_effects(layer_path) if not eff_by_undo and layer_unsaved:", "layer, if the layer is not a top layer the", "\"\"\"Gets the effected state for a given layer with context", "+= 1 self.model.update_comp_layer(rebuild=True) self.model.selection = list(self.node_path_data.values()) if len(self.node_paths) == 1:", "__init__(self, name, data, parent_path, pos, model, layer_path): super(AddNode, self).__init__(model) self.name", "was moved to an index before this command and the", "= nxt_path.full_file_expand(self.file_path, chdir) @processing def undo(self): new_layer = self.model.lookup_layer(self.real_path) if", "= False self.prev_selection = self.model.selection layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) comp", "self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE))) class SetNodesPosition(NxtCommand): \"\"\"Move nodes\"\"\" def __init__(self, node_positions, model,", "is not None: # delete node _, dirty = self.stage.delete_node(node,", "in self.prev_values.items(): layer.collapse[node_path] = prev_value self.model.comp_layer.collapse[node_path] = prev_value self.model.collapse_changed.emit(list(self.prev_values.keys())) @processing", "mutated by the stage as it deletes node, this behavior", "Connections\"\"\" def __init__(self, paths, model): super(SetSelection, self).__init__() self.new_paths = paths", "self.setText(\"Remove {} attr from {}\".format(self.attr_name, self.node_path)) class RevertCompute(SetNodeAttributeValue): \"\"\"Revert compute\"\"\"", "self.color)) def _add_node_hierarchy(base_node_path, model, layer): stage = model.stage comp_layer =", "logging import time # External from Qt.QtWidgets import QUndoCommand #", "not self.recomp: changed = tuple([self.node_path] + self.created_node_paths) self.model.nodes_changed.emit(changed) self.model.selection =", "self.prev_selection = self.model.selection # get previous node data for all", "self.stage.get_node_child_order(ancestor) node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path, child_order] self.prev_node_data[node_path] = node_data nodes +=", "= not layer.get_soloed(local=True) layer.set_soloed(state) self.layer_paths.append(layer.real_path) else: state = not layer.get_soloed(local=False)", "self.file_path = file_path self.file_name = file_name self.chdir = chdir @processing", "= list(self.node_path_data.values()) if len(self.node_paths) == 1: path_str = self.node_paths[0] else:", "self._layers_effected_by_me[layer_path] = {'undo': False, 'redo': True} self.model.effected_layers.add(layer_path) else: # Layer", "= self.node_data.get('pos') if pos: self.model.top_layer.positions[self.node_path] = pos # This might", "\"\"\"Duplicate nodes on this graph\"\"\" def __init__(self, node_paths, descendants, model,", "self).__init__(node_path, attr_name, data, model, layer_path) class LocalizeCompute(SetNodeAttributeValue): \"\"\"Localize nodes\"\"\" def", "model self.prev_paths = self.model.selection def undo(self): self.model.selection = self.prev_paths def", "self.redo_effected_layer(layer.real_path) def rename_attribute(self, layer, attr_name, new_attr_name): node = layer.lookup(self.node_path) self.stage.rename_node_attr(node,", "None: self.stage.delete_node(n, layer=self.model.target_layer, remove_layer_data=False) layers = [self.model.target_layer] for node_path, all_data", "self.created_node_paths or self.attr_name in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: changed_attrs =", "super(SetCompute, self).redo() self.setText(\"Changed compute on {}\".format(self.node_path)) class SetNodeComment(SetNodeAttributeValue): \"\"\"Set node", "attr_name self.attr_name = attr_name self.data = data self.stage = model.stage", "= self.prev_breaks self.model.nodes_changed.emit(tuple(self.prev_breaks)) @processing def redo(self): self.prev_breaks = user_dir.breakpoints.get(self.layer_path, [])", "colors.LAYER_COLORS] open_layer_colors = [] for layer in self.stage._sub_layers: color =", "class SetLayerAlias(NxtCommand): \"\"\"Set Layer Alias\"\"\" def __init__(self, alias, layer_path, model):", "self.new_layer_path = new_layer.real_path self.redo_effected_layer(new_layer.real_path) # Fixme: The next 2 lines", "Does not account for rebuilding proxy nodes for the dirty", "effects the # layer, meaning the layer has been saved", "node_path, model, layer_path): super(RevertInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, None, model, layer_path) def", "from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger) from nxt import", "= self.stage.add_node(name=self.node_data['name'], data=self.node_data['save_dict'], parent=parent, layer=layer.layer_idx(), comp_layer=comp_layer, fix_names=False) if self.node_data['break']: self.model._add_breakpoint(self.node_path,", "pos[1] + 20] self.model._set_node_pos(new_node_path, pos, layer=target_layer) self.model.selection = new_selection self.model.update_comp_layer(rebuild=True)", "def redo(self): super(LocalizeCompute, self).redo() self.setText(\"Localize compute on {}\".format(self.node_path)) class LocalizeInstancePath(SetNodeAttributeValue):", "self.file_path, SAVE_KEY.REAL_PATH: real_path, SAVE_KEY.COLOR: layer_color, SAVE_KEY.ALIAS: self.file_name } new_layer =", "soloed.\".format(layer.get_alias())) class SetLayerColor(NxtCommand): def __init__(self, color, layer_path, model): \"\"\"Sets the", "[0.0, 0.0] self.prev_selection = self.model.selection # resulting node self.node_path =", "= self.node_paths[idx] attr_state = self.model.remove_attr_display_state(old_node_path) if attr_state is not None:", "def redo(self): super(RemoveLayer, self).undo() self.setText(self.text) class MuteToggleLayer(NxtCommand): \"\"\"Toggles muting an", "self.stage = model.stage # get undo data self.prev_selection = self.model.selection", "update_time + \"ms\") def redo_debug(cmd, start): update_time = str(int(round((time.time() -", "else: nodes_str = 'nodes' self.setText('Duplicated {}'.format(nodes_str)) class InstanceNode(SetNodeAttributeValue): \"\"\"Instance nodes", "It is important to note that the other_removed_nodes list must", "if len(self.new_positions) == 1: nodes_str = node_path else: nodes_str =", "target_layer = self.model.lookup_layer(self.target_layer_path) self.redo_effected_layer(target_layer.real_path) for node_path in self.node_paths: node =", "command can effect layers in different ways. :param layer_path: string", "in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) changed_attrs = () for dirty", "number of attrs. super(DuplicateNodes, self).__init__(model) self.node_paths = node_paths self.descendants =", "# Internal from nxt_editor import colors from nxt_editor import user_dir", "self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data = {} self.created_node_paths", "we mark it as such by setting the class attr", "= None self.model = model self.stage = model.stage self.node_paths =", "layer_path, model): idx = model.lookup_layer(layer_path).layer_idx() super(RemoveLayer, self).__init__(layer_path, idx, model, None)", "__init__(self, alias, layer_path, model): super(SetLayerAlias, self).__init__(model) self.layer_path = layer_path self.alias", "if 0 < self.insert_idx <= sub_layer_count: parent_layer = self.stage._sub_layers[self.insert_idx -", "else: state = not layer.get_soloed(local=False) self.model.top_layer.set_solo_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_solo_changed.emit((self.layer_path,))", "self.undo_effected_layer(self.stage.top_layer.real_path) attr_display = self.node_data.get('attr_display') if attr_display is not None: self.model._set_attr_display_state(self.node_path,", "time.time() created_node = False self.prev_selection = self.model.selection layer = self.model.lookup_layer(self.layer_path)", "= new_attr_name self.model = model self.stage = model.stage self.layer_path =", "child_order, model, layer_path) def redo(self): super(SetNodeChildOrder, self).redo() self.setText(\"Change child order", "comp_layer=comp_layer, remove_layer_data=rm_layer_data) dirty_nodes += dirty dirty_nodes += self.created_node_paths dirty_nodes +=", "layer.descendants(self.node_path): self.recomp = True created_node = True self.created_node_paths += [self.node_path]", "get current node hierarchy information for each node. each node", "self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(layers[0].real_path) self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data =", "deleted new_nodes, dirty = self.stage.add_node(name=self.node_data['name'], data=self.node_data['save_dict'], parent=parent, layer=layer.layer_idx(), comp_layer=comp_layer, fix_names=False)", "in self.prev_node_data.items(): apply_data = {} node = self.model.target_layer.lookup(node_path) if not", "path on {}\".format(self.node_path)) class LocalizeExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path):", "'redo': True} self.model.effected_layers.add(layer_path) else: # Layer was saved and then", "# get the node node = layer.lookup(self.node_path) dirties = [self.node_path]", "else: path_str = str(self.node_paths) self.setText(\"Parent {} to {}\".format(path_str, self.parent_node_path)) class", "value this also adds the attribute if it does not", "self.prev_data = copy.deepcopy(self.prev_data) # set attribute value this also adds", "= model self.layer_path = layer_path @processing def undo(self): layer =", "node collapse state\"\"\" def __init__(self, node_paths, value, model, layer_path): super(SetNodeCollapse,", "* 1000))) logger.debug(cmd.text() + \" | \" + update_time +", "saved and then undo was called, thus this redo has", "[node] node_hierarchy_data[top_node_path] = top_node_descendant_list if not node_hierarchy_data: return # parent", "def redo_effected_layer(self, layer_path): \"\"\"Adds layer to the model's set of", "super(RenameAttribute, self).__init__(model) self.node_path = node_path self.attr_name = attr_name self.new_attr_name =", "* 1000))) logger.debug(\"Undo \" + cmd.text() + \" | \"", "self.prev_selection self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(target_layer.real_path) @processing def redo(self): new_selection = [] self.new_node_paths", "__init__(self, node_path, model, source_layer_path, target_layer_path): src_name = nxt_path.node_name_from_node_path(node_path) parent_path =", "self._get_effects(layer_just_saved) where_were_at = self.model.undo_stack.index() cur_cmd = self.model.undo_stack.command(max(0, where_were_at - 1))", "= self.model.lookup_layer(self.layer_path) node = layer.lookup(self.node_path) self.data = self.stage.get_node_attr_data(node, self.attr_name, layer)", "{} def _get_effects(self, layer_path): \"\"\"Gets the effected state for a", "class AddNode(NxtCommand): \"\"\"Add a node to the graph\"\"\" def __init__(self,", "data, parent_path, pos, model, layer_path): super(AddNode, self).__init__(model) self.name = name", "class SetNodeInstance(SetNodeAttributeValue): \"\"\"Set node instance\"\"\" def __init__(self, node_path, instance_path, model,", "model's set of effected (unsaved) layers. If the layer is", "redo(self): self.prev_breaks = user_dir.breakpoints.get(self.layer_path, []) if self.layer_path in list(user_dir.breakpoints.keys()): user_dir.breakpoints.pop(self.layer_path)", "all_data['data'].get('child_order', []) apply_data['child_order'] = child_order apply_data['attributes'] = data.get('attributes', {}) attrs_to_keep", "top_node = node top_node_path = layer.get_node_path(top_node) top_node_descendant_list = node_hierarchy_data.get(top_node, [])", "= self.prev_node_data[old_path] # restore name prev_name = node_data['name'] name =", "model's set of effected (unsaved) layers. If this command was", "self.insert_idx <= sub_layer_count: parent_layer = self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else:", "list(self.node_path_data.values()) if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str", "is called layer = self.model.lookup_layer(self.layer_path) node = layer.lookup(self.node_path) self.data =", "The next 2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.real_path) self.model.layer_added.emit(self.real_path)", "after a layer is saved. :param layer_path: string of layer", "__init__(self, node_path, value, model, layer_path): super(SetNodeStartPoint, self).__init__(node_path, INTERNAL_ATTRS.START_POINT, value, model,", "if not target_node: new_nodes, new_paths, dirty = _add_node_hierarchy(node_path, self.model, layer)", "self).__init__(node_path, attr_name, data, model, layer_path) def redo(self): super(AddAttribute, self).redo() self.remove_attr", "its been saved & the undo effects it eff_by_redo =", "# delete any created nodes for node_path in self.created_node_paths: n", "layer = self.model.lookup_layer(self.layer_path) new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0),", "to an index before this command and the same #", "= self.prev_selection self.setText('Revert {}'.format(self.node_path)) class ParentNodes(NxtCommand): \"\"\"Parent Nodes\"\"\" def __init__(self,", "don't touch the top layer in redo... self.undo_effected_layer(self.stage.top_layer.real_path) attr_display =", "__init__(self, layer_path, model): super(MuteToggleLayer, self).__init__(model) self.layer_path = layer_path self.model =", "created_node = True self.created_node_paths += [self.node_path] node = layer.lookup(self.node_path) self.prev_data", "layer_path): super(RevertExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, None, model, layer_path) def redo(self): self.setText(\"Revert", "self.name = name self.data = data self.parent_path = parent_path self.layer_path", "we can only set one attr's data at a #", "= False comp_layer = self.model.comp_layer if node is not None:", "nodes\"\"\" def __init__(self, node_path, model, layer_path): comp_layer = model.comp_layer display_node", "by other DeleteNode commands in a command macro. The list", "an empty node new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path, self.model, layer)", "child_order_tuple: ancestor_path, child_order = child_order_tuple ancestor = layer.lookup(ancestor_path) if ancestor:", "adds the attribute if it does not exist if not", "layer_just_saved in self._layers_effected_by_me: if eff_by_undo: # This command has already", "node is None: parent_path = nxt_path.get_parent_path(self.node_path) name = nxt_path.node_name_from_node_path(self.node_path) if", "attr_name, data, model, layer_path): super(SetNodeAttributeData, self).__init__(model) self.node_path = node_path self.nice_attr_name", "child_order = child_order_tuple ancestor = layer.lookup(ancestor_path) if ancestor: self.stage.set_node_child_order(ancestor, child_order,", "layer = self.model.lookup_layer(self.layer_path) # Remove our created empty nodes for", "layer.lookup(self.node_path) # get node info parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH) name", "if self.attr_name in INTERNAL_ATTRS.ALL: self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name) attr_data = {self.return_value:", "{}\".format(self.node_path)) class RevertExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertExecPath, self).__init__(node_path,", "super(ParentNodes, self).__init__(model) self.parent_node_path = parent_node_path self.parent_node = None self.model =", "new_child_order = list_merger(display_child_order, old_child_order) setattr(n, INTERNAL_ATTRS.CHILD_ORDER, new_child_order) new_node_paths += [nn_p]", "model.stage # command data self.pos = pos or [0.0, 0.0]", "self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, inst_path, model, layer_path) def redo(self): super(LocalizeInstancePath, self).redo() self.setText(\"Localize", "self.attr_name): self.remove_attr = True if not created_node: self.return_value = self.stage.node_setattr_data(node,", "_add_node_hierarchy(self.node_path, self.model, layer) self.created_node_paths += new_paths self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.model.selection = self.prev_selection", "for node_path in self.created_node_paths: n = layer.lookup(node_path) if n is", "func = self.model._remove_breakpoint for node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths))", "must be shared by other DeleteNode commands in a command", "idx self.file_path = file_path self.real_path = nxt_path.full_file_expand(self.file_path, chdir) @processing def", "nxt import nxt_path from nxt.nxt_layer import LAYERS, SAVE_KEY from nxt.nxt_node", "= self.return_value self.model.selection = [self.node_path] if self.model.get_is_node_start(self.node_path, self.model.comp_layer): self.model.starts_changed.emit(self.model.get_start_nodes()) self.setText(\"{}", "self.model._set_node_pos(self.new_path, new_pos, layer) super(InstanceNode, self).redo() self.return_value = self.new_path self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE)))", "self.value self.model.comp_layer.collapse[node_path] = self.value self.model.collapse_changed.emit(list(self.prev_values.keys())) if len(self.node_paths) == 1: path_str", "self.model = model self.stage = model.stage @processing def undo(self): layer", "= self.old_node_path self.model.selection = [self.node_path] def redo(self): self.model.about_to_rename.emit() super(RenameNode, self).redo()", "node = layer.lookup(node_path) name = getattr(node, INTERNAL_ATTRS.NAME) parent_path = getattr(node,", "layer_path) # Get the data to be set if undo", "model, layer_path) def redo(self): super(SetAttributeComment, self).redo() attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name)", "model signal new_node_path = target_layer.get_node_path(new_node) self.new_node_paths += [new_node_path] # self.model.node_added.emit(new_node_path)", "self.old_positions = {} for path in self.new_positions.keys(): self.old_positions[path] = model.get_node_pos(path)", "in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if", "def undo(self): self.model.about_to_rename.emit() self.prev_data['force'] = True super(RenameNode, self).undo() self.node_path =", "nodes for the dirty nodes dirty_set = tuple(set(dirty)) self.undo_effected_layer(self.layer_path) if", "= self.new_path self.setText('Instanced {}'.format(self.data.get(META_ATTRS.VALUE))) class SetNodesPosition(NxtCommand): \"\"\"Move nodes\"\"\" def __init__(self,", "= ancestor_tuple ancestor = layer.lookup(ancestor_path) if ancestor: setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER, ancestor_child_order)", "pos = new_pos prev_pos = self.old_positions[node_path] # Only letting it", "target_node = new_nodes[-1] self.created_node_paths += new_paths # self.model.node_added.emit(node_path) # preserve", "node_hierarchy_data: return # parent self.node_path_data = self.stage.parent_nodes(nodes, self.parent_node_path, layer) self.new_node_paths", "{'undo': False, 'redo': True} self.model.effected_layers.add(layer_path) else: # Layer was saved", "super(RemoveLayer, self).redo() self.setText(self.text) @processing def redo(self): super(RemoveLayer, self).undo() self.setText(self.text) class", "node if it doesn't exist on the target layer target_node", "= new_pos prev_pos = self.old_positions[node_path] # Only letting it set", "= model.stage self.layer_path = layer_path self.prev_values = {} @processing def", "= layer.lookup(self.node_path) self.stage.rename_node_attr(node, attr_name, new_attr_name, layer) self.model.update_comp_layer() old_name = nxt_path.make_attr_path(self.node_path,", "top layer the top layer store an overrides. :param color:", "else: rm_layer_data = False for p in self.others[:]: self.others +=", "self).redo() self.setText(\"Localize compute on {}\".format(self.node_path)) class LocalizeInstancePath(SetNodeAttributeValue): def __init__(self, node_path,", "1 self.model.update_comp_layer(rebuild=True) self.model.selection = list(self.node_path_data.values()) if len(self.node_paths) == 1: path_str", "= nxt_path.get_parent_path(self.node_path) name = nxt_path.node_name_from_node_path(self.node_path) if self.attr_name in INTERNAL_ATTRS.ALL: self.return_value", "StageModel \"\"\" super(SetLayerColor, self).__init__(model) self.layer_path = layer_path self.color = color", "reference to {}\".format(layer_path) @processing def undo(self): super(RemoveLayer, self).redo() self.setText(self.text) @processing", "in self.model.effected_layers if layer_saved: eff_by_undo = True # Set redo", "# Layer was saved and then undo was called, thus", "layer) self.model._set_node_pos(self.new_path, new_pos, layer) super(InstanceNode, self).redo() self.return_value = self.new_path self.setText('Instanced", "= data.get('enabled') if data.get('instance'): apply_data['instance'] = data['instance'] self.stage.transfer_node_data(node, self.model.target_layer, apply_data,", "file_name self.chdir = chdir @processing def undo(self): new_layer = self.model.lookup_layer(self.new_layer_path)", "# get node info parent = getattr(node, INTERNAL_ATTRS.PARENT_PATH) name =", "{}\".format(self.attr_name, self.node_path)) class DeleteAttribute(AddAttribute): \"\"\"Delete attribute on a node\"\"\" def", "= self.stage.duplicate_node(node=node, layer=target_layer, descendants=self.descendants) new_selection.append(target_layer.get_node_path(new[0])) # process new nodes for", "# get node selection for undo self.prev_selection = self.model.selection #", "{}\".format(self.attr_name, self.node_path)) class RevertCompute(SetNodeAttributeValue): \"\"\"Revert compute\"\"\" def __init__(self, node_path, model,", "layer_path) def redo(self): super(LocalizeInstancePath, self).redo() self.setText(\"Localize instance path to {}\".format(self.node_path))", "child_order_tuple ancestor = layer.lookup(ancestor_path) if ancestor: self.stage.set_node_child_order(ancestor, child_order, layer) if", "= node_positions self.old_positions = {} for path in self.new_positions.keys(): self.old_positions[path]", "Set redo to False since now its been saved &", "node_path, model, target_layer_path) def redo(self): node_path = self.data.get(META_ATTRS.VALUE) layer =", "True super(DeleteAttribute, self).undo() layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.setText(\"Remove {} attr", "was deleted new_nodes, dirty = self.stage.add_node(name=self.node_data['name'], data=self.node_data['save_dict'], parent=parent, layer=layer.layer_idx(), comp_layer=comp_layer,", "is not None: self.node_data['attr_display'] = attr_display # get layer data", "= data self.parent_path = parent_path self.layer_path = layer_path self.stage =", "dirty_nodes = [] # delete any created nodes for node_path", "descendants=self.descendants) new_selection.append(target_layer.get_node_path(new[0])) # process new nodes for new_node in new:", "a bug? We don't touch the top layer in redo...", "node_data['pos'] source_layer = self.stage.get_node_source_layer(node) self.model._set_node_pos(old_path, prev_pos, layer=source_layer) # delete any", "lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.new_layer_path) self.model.layer_added.emit(self.new_layer_path) self.setText(\"New layer {}\".format(self.new_layer_path))", "self.node_paths = node_paths self.model = model self.stage = model.stage self.prev_selection", "undo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.color =", "= model.stage # command data self.pos = pos or [0.0,", "# process new nodes for new_node in new: # add", "node_path, all_data in self.prev_node_data.items(): apply_data = {} node = self.model.target_layer.lookup(node_path)", "file_name, idx, model, chdir): super(NewLayer, self).__init__(model) self.new_layer_path = None self.model", "comp layer. It is important to note that the other_removed_nodes", "to the list and emit model signal new_node_path = target_layer.get_node_path(new_node)", "remove attr here to insure attr is deleted self.remove_attr =", "= True super(DeleteAttribute, self).undo() layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.setText(\"Remove {}", "self).__init__() self.new_paths = paths self.model = model self.prev_paths = self.model.selection", "layer_path) def undo(self): self.model.about_to_rename.emit() self.prev_data['force'] = True super(RenameNode, self).undo() self.node_path", "prev_parent_node = layer.lookup(prev_parent_path) new_path = self.node_path_data[old_path] node = layer.lookup(new_path) if", "if ancestors: ancestor = ancestors[0] ancestor_path = layer.get_node_path(ancestor) child_order =", "nxt_path.make_attr_path(self.node_path, new_attr_name) self.setText(\"Rename {} to {}\".format(old_name, new_name)) class SetAttributeComment(SetNodeAttributeData): \"\"\"Set", "layer_path): super(SetNodeInstance, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, instance_path, model, layer_path) def redo(self): super(SetNodeInstance,", "layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: state = not", "nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger) from nxt import nxt_io", "self.created_node_paths = [] self.node_path = node_path def undo(self): layer =", "self.remove_attr: self.stage.delete_node_attr(n, self.attr_name) dirties += comp.get_node_dirties(self.node_path) else: result = self.stage.node_setattr_data(node=n,", "layer_data = nxt_io.load_file_data(self.real_path) extra_data = {\"parent_layer\": parent_layer, \"filepath\": self.file_path, \"real_path\":", "attr_name, data, model, layer_path) class RenameNode(SetNodeAttributeValue): \"\"\"Rename node\"\"\" def __init__(self,", "it does not exist if not self.stage.node_attr_exists(node, self.attr_name): self.remove_attr =", "nodes += [node] # get current node hierarchy information for", "@processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.new_attr_name, self.attr_name) self.undo_effected_layer(layer.real_path)", "self.return_value)) class DuplicateNodes(NxtCommand): \"\"\"Duplicate nodes on this graph\"\"\" def __init__(self,", "layer_path): node = model.comp_layer.lookup(node_path) data = model.stage.get_node_attr_data(node, attr_name, model.comp_layer) if", "layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.setText(\"Remove {} attr from {}\".format(self.attr_name, self.node_path))", "of node path :param model: StageModel :param layer_path: String of", "else: ancestors = layer.ancestors(node_path) if ancestors: ancestor = ancestors[0] ancestor_path", "the layer try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed by a", "resulting nodes self.node_path_data = {} self.new_node_paths = [] self.created_node_paths =", "in list(common_parent_nodes.keys()): common_parent_nodes[prev_parent_path] = {node: old_path} else: common_parent_nodes[prev_parent_path][node] = old_path", "layer=target_layer, descendants=self.descendants) new_selection.append(target_layer.get_node_path(new[0])) # process new nodes for new_node in", "dirty = stage.add_node_hierarchy(node_hierarchy, parent=None, layer=layer, comp_layer=comp_layer) for nn_p, n in", "pos[0] - prev_pos[0] y_delta = pos[1] - prev_pos[1] delta_str =", "Internal from nxt_editor import colors from nxt_editor import user_dir from", "source_layer_path self.target_layer_path = target_layer_path self.stage = model.stage # get undo", "super(LocalizeCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(LocalizeCompute, self).redo()", "super(NxtCommand, self).__init__() self.model = model self.model.layer_saved.connect(self.reset_layer_effected) self._layers_effected_by_me = {} def", "will be deleted in this event loop. \"\"\" super(DeleteNode, self).__init__(model)", "class SetAttributeComment(SetNodeAttributeData): \"\"\"Set attribute comment\"\"\" def __init__(self, node_path, attr_name, comment,", "pos, layer=target_layer) self.model.selection = new_selection self.model.update_comp_layer(rebuild=True) if len(self.node_paths) == 1:", "= model.lookup_layer(layer_path).layer_idx() super(RemoveLayer, self).__init__(layer_path, idx, model, None) self.text = \"Removed", "if attr_display is not None: self.node_data['attr_display'] = attr_display # get", "an attribute to a node.\"\"\" def __init__(self, node_path, attr_name, value,", "the target layer target_node = self.model.target_layer.lookup(node_path) if not target_node: new_nodes,", "and the same # layer was saved again. eff_by_redo =", "node_path: pos = self.model.get_node_pos(node_path) pos = [pos[0] + 20, pos[1]", "20, pos[1] + 20] self.model._set_node_pos(new_node_path, pos, layer=target_layer) self.model.selection = new_selection", "class SoloToggleLayer(NxtCommand): \"\"\"Toggles soloing an existing layer\"\"\" def __init__(self, layer_path,", "layer.lookup(new_path) if prev_parent_path not in list(common_parent_nodes.keys()): common_parent_nodes[prev_parent_path] = {node: old_path}", "\"\"\" super(SetLayerColor, self).__init__(model) self.layer_path = layer_path self.color = color self.old_color", "child_order, layer) if new_path in list(self.model.top_layer.positions.keys()): source_layer = self.stage.get_node_source_layer(node) source_layer.positions.pop(new_path)", "node_path in self.node_paths: node = layer.lookup(node_path) name = getattr(node, INTERNAL_ATTRS.NAME)", "_, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data, other_removed_nodes=self.others) dirty_nodes +=", "just be a # setattr. The way it works now", "= node_data.get(INTERNAL_ATTRS.CHILD_ORDER) if child_order_tuple: ancestor_path, child_order = child_order_tuple ancestor =", "self).__init__(model) self.layer_path = layer_path self.color = color self.old_color = ''", "= prev_value self.model.collapse_changed.emit(list(self.prev_values.keys())) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path)", "= self.model.lookup_layer(self.layer_path) comp_layer = self.model.comp_layer parent = self.node_data['parent'] # We", "+= 1 self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data", "self).__init__(node_path, model, layer_path, others) self.rebuild = False # Tells the", "Get the data to be set if undo is called", "will be mutated by the stage as it deletes node,", "- 1)) if cur_cmd is self: return if layer_just_saved in", "for parent_path, nodes_dict in common_parent_nodes.items(): self.stage.parent_nodes(nodes=list(nodes_dict.keys()), parent_path=parent_path, layer=layer) for parent_path,", "self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) self.setText(\"Set {} alias to {}\".format(layer.filepath, self.alias)) class NewLayer(NxtCommand):", "# get undo data self.prev_selection = self.model.selection # resulting nodes", "fix names because we know this node should be #", "= [] @processing def undo(self): target_layer = self.model.lookup_layer(self.target_layer_path) # delete", "# redo_debug(self, start) class SetNodeAttributeValue(SetNodeAttributeData): def __init__(self, node_path, attr_name, value,", "def redo(self): new_selection = [] self.new_node_paths = [] source_layer =", "= model.stage # get undo data self.prev_selection = self.model.selection self.prev_starts", "layer_path): super(SetNodeAttributeData, self).__init__(model) self.node_path = node_path self.nice_attr_name = attr_name self.attr_name", "attr_display is not None: self.model._set_attr_display_state(self.node_path, attr_display) user_dir.breakpoints = self.prev_breaks ancestor_tuple", "y_delta) if len(self.new_positions) == 1: nodes_str = node_path else: nodes_str", "# command data self.pos = pos or [0.0, 0.0] self.prev_selection", "that the other_removed_nodes list must be shared by other DeleteNode", "= self.node_paths[0] else: path_str = str(self.node_paths) self.setText(\"Parent {} to {}\".format(path_str,", "self.prev_node_data[node_path] = node_data self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.redo_effected_layer(layer.real_path) self.model.selection = self.prev_selection if len(self.node_paths)", "the node as an empty node new_nodes, new_paths, dirty =", "import time # External from Qt.QtWidgets import QUndoCommand # Internal", "layer.ancestors(self.node_path) if closest_ancestor: closest_ancestor = closest_ancestor[0] else: closest_ancestor = None", "= self.model.comp_layer self.remove_attr = False self.created_node_paths = [] # get", "self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data = {}", "also adds the attribute if it does not exist if", "not node_hierarchy_data: return # parent self.node_path_data = self.stage.parent_nodes(nodes, self.parent_node_path, layer)", "self.model = model self.stage = model.stage self.insert_idx = idx self.file_path", ":param node_path: String of node path :param model: StageModel :param", "= self.model.get_start_nodes(layer) self.prev_breaks = user_dir.breakpoints dirty_nodes = [] node =", "to {}\".format(path_str, self.parent_node_path)) class AddAttribute(SetNodeAttributeData): \"\"\"Add an attribute to a", "= node_paths self.value = value self.model = model self.stage =", "pos = self.node_data.get('pos') if pos: self.model.top_layer.positions[self.node_path] = pos # This", "= [self.node_path] if self.model.get_is_node_start(self.node_path, self.model.comp_layer): self.model.starts_changed.emit(self.model.get_start_nodes()) self.setText(\"{} renamed to {}\".format(self.old_node_path,", "model, layer_path): super(SetNodeComment, self).__init__(node_path, INTERNAL_ATTRS.COMMENT, comment, model, layer_path) def redo(self):", "prev_value self.model.comp_layer.collapse[node_path] = prev_value self.model.collapse_changed.emit(list(self.prev_values.keys())) @processing def redo(self): layer =", "SetLayerAlias(NxtCommand): \"\"\"Set Layer Alias\"\"\" def __init__(self, alias, layer_path, model): super(SetLayerAlias,", "@processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer:", "same # layer was saved again. eff_by_redo = True eff_by_undo", "= model.comp_layer new_node_paths = [] new_nodes = [] node_hierarchy =", "data = all_data['data'] child_order = all_data['data'].get('child_order', []) apply_data['child_order'] = child_order", "self.model.top_layer: self.old_alias = layer.get_alias(local=True) layer.set_alias(self.alias) else: self.old_alias = layer.get_alias(fallback_to_local=False) layer.set_alias_over(self.alias)", "except ValueError: continue super(RemoveFromSelection, self).__init__(new_selection, model) def redo(self): super(RemoveFromSelection, self).redo()", "remove_layer_data=False) n = layer.lookup(self.node_path) if n is not None: if", "= model.lookup_layer(layer_path) parent_path = nxt_path.get_parent_path(node_path) new_name = model.stage.get_unique_node_name(name=name, layer=layer, parent_path=parent_path,", "self.node_data['break']: self.model._remove_breakpoint(self.node_path, layer) self.model._remove_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._remove_start_node(self.node_path, layer) node", "= apply_data['attributes'].keys() apply_data['enabled'] = data.get('enabled') if data.get('instance'): apply_data['instance'] = data['instance']", "= node_data['name'] name = getattr(node, INTERNAL_ATTRS.NAME) if name != prev_name:", "= False first_eff_by_redo = False try: first_eff_by_undo = self._layers_effected_by_me[layer_path]['undo'] except", "layers. If the layer is not marked as effected in", "__init__(self, node_path, code_lines, model, layer_path): super(SetCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model,", "layer_path self.alias = alias self.old_alias = '' self.model = model", "for node_path in self.node_paths: node = layer.lookup(node_path) top_node = self.stage.get_top_node(node,", "{} self.new_node_paths = [] self.created_node_paths = [] # get node", "val)) # redo_debug(self, start) class SetNodeAttributeValue(SetNodeAttributeData): def __init__(self, node_path, attr_name,", "single command can effect layers in different ways. :param layer_path:", "undo(self): self.model.about_to_rename.emit() self.prev_data['force'] = True super(RenameNode, self).undo() self.node_path = self.old_node_path", "value, model, layer_path): data = {META_ATTRS.VALUE: value} super(SetNodeAttributeValue, self).__init__(node_path, attr_name,", "{}\".format(path_str)) else: self.setText(\"Remove breakpoint from {}\".format(path_str)) class ClearBreakpoints(QUndoCommand): \"\"\"Clear all", "color, layer_path, model): \"\"\"Sets the color for a given layer,", "can be used to # set multiple attr's data. That", "self.value = value self.model = model self.stage = model.stage self.layer_path", "layer is self.model.top_layer: layer.set_alias(self.old_alias) else: layer.set_alias_over(self.old_alias) self.undo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) @processing def", "= layer.lookup(node_path) if node is not None: self.stage.delete_node(node, layer) idx", "new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path, self.model, layer) self.created_node_paths += new_paths", "This might be a bug? We don't touch the top", "works now we can only set one attr's data at", "# resulting node self.node_path = None self.created_node_paths = [] @processing", "layer_path) def redo(self): super(RevertCompute, self).redo() self.setText(\"Revert compute on {}\".format(self.node_path)) class", "layer=layer, comp_layer=comp, remove_layer_data=False) n = layer.lookup(self.node_path) if n is not", "- 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_data = nxt_io.load_file_data(self.real_path)", "layer=layer, comp_layer=comp_layer) for nn_p, n in new_node_table: display_node = comp_layer.lookup(nn_p)", "def __init__(self, paths, model): self.rem_paths = paths new_selection = model.selection[:]", "layer.lookup(self.node_path) dirties = [self.node_path] if node is None: parent_path =", "self.model.nodes_changed.emit(tuple(self.prev_breaks)) @processing def redo(self): self.prev_breaks = user_dir.breakpoints.get(self.layer_path, []) if self.layer_path", "= node_data self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.redo_effected_layer(layer.real_path) self.model.selection = self.prev_selection if len(self.node_paths) ==", "layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) def redo(self): # Overload remove attr", "layer_path: string of layer real path :return: None \"\"\" eff_by_undo,", "marked as unsaved even if we undo an action after", "self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.real_path) self.model.layer_added.emit(self.real_path) self.setText(\"Added reference to {}\".format(self.real_path)) class RemoveLayer(ReferenceLayer): \"\"\"Remove", "model, layer_path) # Get the data to be set if", "= self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) def redo(self): # Overload remove attr here", "self.redo_effected_layer(layer_path) @processing def toggle_state(self): layer = self.model.lookup_layer(self.layer_path) if layer is", "else: # Now the undo of this command effects the", "= self.model.lookup_layer(self.target_layer_path) self.redo_effected_layer(target_layer.real_path) for node_path in self.node_paths: node = source_layer.lookup(node_path)", "layer real path :return: None \"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved)", "self.prev_values[np] = self.model.get_node_collapse(np, layer) for node_path in self.node_paths: layer.collapse[node_path] =", "RemoveFromSelection(SetSelection): def __init__(self, paths, model): self.rem_paths = paths new_selection =", "state for a given layer with context to this command.", "= {} display_node = self.model.comp_layer.lookup(node_path) if not display_node: continue #", "layer_path): super(DeleteAttribute, self).__init__(node_path, attr_name, None, model, layer_path) # Get the", "self.layer_paths: self.redo_effected_layer(layer_path) @processing def toggle_state(self): layer = self.model.lookup_layer(self.layer_path) if layer", "def redo(self): super(LocalizeInstancePath, self).redo() self.setText(\"Localize instance path to {}\".format(self.node_path)) class", "not the redo eff_by_redo = False eff_by_undo = True self._layers_effected_by_me[layer_just_saved]", "is None: parent_path = nxt_path.get_parent_path(self.node_path) name = nxt_path.node_name_from_node_path(self.node_path) if self.attr_name", "user_dir.breakpoints[self.layer_path] = self.prev_breaks self.model.nodes_changed.emit(tuple(self.prev_breaks)) @processing def redo(self): self.prev_breaks = user_dir.breakpoints.get(self.layer_path,", "new_node in new: # add new node path to the", "be deleted in this event loop. \"\"\" super(DeleteNode, self).__init__(model) self.layer_path", "data: data.pop(META_ATTRS.SOURCE) super(LocalizeUserAttr, self).__init__(node_path, attr_name, data, model, layer_path) class LocalizeCompute(SetNodeAttributeValue):", "super(LocalizeExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_path, model, layer_path) def redo(self): super(LocalizeExecPath, self).redo()", "= {} for old_path, node_data in self.prev_node_data.items(): prev_parent_path = node_data['parent']", "undo(self): layer = self.model.lookup_layer(self.layer_path) for node_path, old_pos in self.old_positions.items(): self.model._set_node_pos(node_path=node_path,", "0), layer) self.model._set_node_pos(self.new_path, new_pos, layer) super(InstanceNode, self).redo() self.return_value = self.new_path", "self.created_node_paths = [] nodes = [] layer = self.model.target_layer self.redo_effected_layer(layer.real_path)", "self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) comp = self.model.comp_layer dirties = [self.node_path] # delete", "KeyError: pass return first_eff_by_undo, first_eff_by_redo def reset_layer_effected(self, layer_just_saved): \"\"\"When the", "comp_layer = self.model.comp_layer parent = self.node_data['parent'] # We don't want", "[] nodes, dirty = self.stage.add_node(name=self.name, data=self.data, parent=self.parent_path, layer=layer.layer_idx(), comp_layer=self.model.comp_layer) dirty_nodes", "depended upon! :param node_path: String of node path :param model:", "layer = self.model.target_layer for node_path in self.node_paths: node_data = {}", "{}'.format(x_delta, y_delta) if len(self.new_positions) == 1: nodes_str = node_path else:", "= self.model.get_node_pos(node_path) pos = [pos[0] + 20, pos[1] + 20]", "this also adds the attribute if it does not exist", "= None self.prev_selection = model.selection @processing def undo(self): start =", "Alias\"\"\" def __init__(self, alias, layer_path, model): super(SetLayerAlias, self).__init__(model) self.layer_path =", "comp_layer.get_node_dirties(p) _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data, other_removed_nodes=self.others) dirty_nodes", "SetAttributeComment(SetNodeAttributeData): \"\"\"Set attribute comment\"\"\" def __init__(self, node_path, attr_name, comment, model,", "for new_node in new: # add new node path to", "not delta_str: pos = new_pos prev_pos = self.old_positions[node_path] # Only", "INTERNAL_ATTRS.COMMENT, comment, model, layer_path) def redo(self): super(SetNodeComment, self).redo() self.setText(\"Changed comment", "def _add_node_hierarchy(base_node_path, model, layer): stage = model.stage comp_layer = model.comp_layer", "in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(LAYERS.TOP) self.undo_effected_layer(self.new_layer_path) self.model.layer_removed.emit(self.new_layer_path) @processing def", "self._get_effects(layer_path) layer_saved = layer_path not in self.model.effected_layers if layer_saved: eff_by_undo", "start = time.time() created_node = False self.prev_selection = self.model.selection layer", "def redo(self): self.prev_node_data = {} self.node_path_data = {} self.new_node_paths =", "parent=parent_path, layer=layer.layer_idx(), comp_layer=comp, fix_names=False) # Fixme: Targeted parenting would avoid", "{}\".format(layer.filepath, self.alias)) class NewLayer(NxtCommand): \"\"\"Add new layer\"\"\" def __init__(self, file_path,", "node_path, name, model, layer_path): self.old_node_path = node_path layer = model.lookup_layer(layer_path)", "= True # Set redo to False since now its", "descendants for each top node so when # they are", "else: self.old_color = layer.get_color(fallback_to_local=False) layer.set_color_over(self.color) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_color_changed.emit(self.layer_path) self.setText(\"Set {} color", "attr_name, data, model, layer_path) def redo(self): super(SetAttributeComment, self).redo() attr_path =", "attr_name, new_attr_name): node = layer.lookup(self.node_path) self.stage.rename_node_attr(node, attr_name, new_attr_name, layer) self.model.update_comp_layer()", "self.node_paths: self.prev_values[np] = self.model.get_node_collapse(np, layer) for node_path in self.node_paths: layer.collapse[node_path]", "self.old_alias = '' self.model = model self.stage = model.stage @processing", "nn_p, n in new_node_table: display_node = comp_layer.lookup(nn_p) if display_node is", "not None: _, dirty = self.stage.delete_node(node, layer, remove_layer_data=False) dirty_nodes +=", "comment, model, layer_path): super(SetNodeComment, self).__init__(node_path, INTERNAL_ATTRS.COMMENT, comment, model, layer_path) def", "if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str =", "def undo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.set_alias(self.old_alias)", "state = not layer.get_soloed(local=False) self.model.top_layer.set_solo_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_solo_changed.emit((self.layer_path,)) self.setText(\"Toggle", "self).redo() attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) self.setText(\"Changed comment on {}\".format(attr_path)) class", "node\"\"\" def __init__(self, node_path, attr_name, model, layer_path): super(DeleteAttribute, self).__init__(node_path, attr_name,", "in new_node_table: display_node = comp_layer.lookup(nn_p) if display_node is not None:", "on consistent delta. x_delta = pos[0] - prev_pos[0] y_delta =", "to {}\".format(self.node_path)) class RevertInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): super(RevertInstancePath,", "order\"\"\" def __init__(self, node_path, child_order, model, layer_path): super(SetNodeChildOrder, self).__init__(node_path, INTERNAL_ATTRS.CHILD_ORDER,", "_, dirties = self.stage.add_node(name=name, data=attr_data, parent=parent_path, layer=layer.layer_idx(), comp_layer=comp, fix_names=False) #", "= self.model.comp_layer if node is not None: # delete node", "= layer.lookup(new_path) if prev_parent_path not in list(common_parent_nodes.keys()): common_parent_nodes[prev_parent_path] = {node:", "makes sure the layer is properly marked as unsaved even", "time and duplicate needs to get local + INTERNAL number", "delta_str = '{}, {}'.format(x_delta, y_delta) if len(self.new_positions) == 1: nodes_str", "{\"parent_layer\": parent_layer, SAVE_KEY.FILEPATH: self.file_path, SAVE_KEY.REAL_PATH: real_path, SAVE_KEY.COLOR: layer_color, SAVE_KEY.ALIAS: self.file_name", "selection'.format(self.rem_paths)) class LocalizeNodes(NxtCommand): \"\"\"Localize nodes\"\"\" def __init__(self, node_paths, model): super(LocalizeNodes,", ":param model: StageModel \"\"\" super(SetLayerColor, self).__init__(model) self.layer_path = layer_path self.color", "self).__init__(model) self.node_path = node_path self.nice_attr_name = attr_name self.attr_name = attr_name", "SetSelection(QUndoCommand): \"\"\"Select Nodes and Connections\"\"\" def __init__(self, paths, model): super(SetSelection,", "nodes for undo self.prev_node_data = {} @processing def undo(self): layer", "display_node, self.model.comp_layer) self.prev_node_data[node_path] = node_data self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.redo_effected_layer(layer.real_path) self.model.selection = self.prev_selection", "if source_layer.layer_idx() > 0: rm_layer_data = True else: rm_layer_data =", "= nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) self.model.attrs_changed.emit(changed_attrs) attr_path = nxt_path.make_attr_path(self.node_path,", "# path is placed in a list of descendants for", "macro. The list will be mutated by the stage as", "if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.set_target_layer(LAYERS.TOP) self.model.update_comp_layer(rebuild=True) self.model.layer_removed.emit(self.real_path) @processing", "self.attr_name) changed_attrs += (attr_path,) self.model.attrs_changed.emit(changed_attrs) attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) val", "attr here to insure attr is deleted self.remove_attr = True", "# localize source node self.stage.transfer_node_data(target_node, self.model.target_layer, display_node, self.model.comp_layer) self.prev_node_data[node_path] =", "display_child_order = getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER) old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER) new_child_order =", "else: func = self.model._remove_breakpoint for node_path in self.node_paths: func(node_path, layer)", "data attr_display = self.model.get_attr_display_state(self.node_path) if attr_display is not None: self.node_data['attr_display']", "because we know this node should be # named what", "node_data in self.prev_node_data.items(): prev_parent_path = node_data['parent'] prev_parent_node = layer.lookup(prev_parent_path) new_path", "INTERNAL_ATTRS.CHILD_ORDER, new_child_order) new_node_paths += [nn_p] new_nodes += [n] return new_nodes,", "self.undo_effected_layer(layer.real_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.attr_name, self.new_attr_name)", "self.stage.delete_node_attr(n, self.attr_name) dirties += comp.get_node_dirties(self.node_path) else: result = self.stage.node_setattr_data(node=n, attr=self.attr_name,", "prev_pos[0] y_delta = pos[1] - prev_pos[1] delta_str = '{}, {}'.format(x_delta,", "{} attr to {}\".format(self.attr_name, self.node_path)) class DeleteAttribute(AddAttribute): \"\"\"Delete attribute on", "instance_path, model, layer_path): super(SetNodeInstance, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, instance_path, model, layer_path) def", "in a command macro. The list will be mutated by", "the node collapse state\"\"\" def __init__(self, node_paths, value, model, layer_path):", "def __init__(self, alias, layer_path, model): super(SetLayerAlias, self).__init__(model) self.layer_path = layer_path", "nodes_str = self.node_paths[0] else: nodes_str = 'nodes' self.setText('Duplicated {}'.format(nodes_str)) class", "if self.node_data['break']: self.model._remove_breakpoint(self.node_path, layer) self.model._remove_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._remove_start_node(self.node_path, layer)", "common_parent_nodes[prev_parent_path][node] = old_path child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER) if child_order_tuple: ancestor_path, child_order", "self).__init__(node_path, INTERNAL_ATTRS.COMMENT, comment, model, layer_path) def redo(self): super(SetNodeComment, self).redo() self.setText(\"Changed", "External from Qt.QtWidgets import QUndoCommand # Internal from nxt_editor import", "node top_node_path = layer.get_node_path(top_node) top_node_descendant_list = node_hierarchy_data.get(top_node, []) top_node_descendant_list +=", "self.insert_idx = idx self.file_path = file_path self.real_path = nxt_path.full_file_expand(self.file_path, chdir)", "super(NewLayer, self).__init__(model) self.new_layer_path = None self.model = model self.stage =", "model.stage # get undo data self.prev_selection = self.model.selection # resulting", "[self.model.target_layer] for node_path, all_data in self.prev_node_data.items(): apply_data = {} node", "= attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP self.return_value = None self.prev_selection = model.selection", "dirty = _add_node_hierarchy(node_path, self.model, layer) target_node = new_nodes[-1] self.created_node_paths +=", "\"\"\"Set node child order\"\"\" def __init__(self, node_path, child_order, model, layer_path):", "= parent_path self.layer_path = layer_path self.stage = model.stage # command", "= value self.model = model self.stage = model.stage self.layer_path =", "self.prev_selection @processing def redo(self): self.prev_node_data = {} self.node_path_data = {}", "= self.node_data.get('ancestor_child_order') if ancestor_tuple: ancestor_path, ancestor_child_order = ancestor_tuple ancestor =", "self.layer_paths.append(layer.real_path) else: state = not layer.get_muted(local=False) self.model.top_layer.set_mute_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True)", "else: self.old_alias = layer.get_alias(fallback_to_local=False) layer.set_alias_over(self.alias) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) self.setText(\"Set {} alias", "created_node: self.return_value = self.stage.node_setattr_data(node, self.attr_name, layer=layer, create=True, comp_layer=comp, **self.data) if", "__init__(self, model): super(NxtCommand, self).__init__() self.model = model self.model.layer_saved.connect(self.reset_layer_effected) self._layers_effected_by_me =", "layer) # restore layer data pos = self.node_data.get('pos') if pos:", "attr from {}\".format(self.attr_name, self.node_path)) class RevertCompute(SetNodeAttributeValue): \"\"\"Revert compute\"\"\" def __init__(self,", "self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: state = not layer.get_soloed(local=True) layer.set_soloed(state)", "True self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo, 'redo': eff_by_redo} def redo_effected_layer(self, layer_path):", "= nxt_path.get_root_path(old_node_path) new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE * 14, GRID_SIZE), self.model.top_layer)", "node_data = self.prev_node_data[old_path] # restore name prev_name = node_data['name'] name", "= child_order apply_data['attributes'] = data.get('attributes', {}) attrs_to_keep = apply_data['attributes'].keys() apply_data['enabled']", "= [] # get the node node = layer.lookup(self.node_path) dirties", "= self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.set_alias(self.old_alias) else: layer.set_alias_over(self.old_alias) self.undo_effected_layer(self.model.top_layer.real_path)", "attribute value this also adds the attribute if it does", "self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_data = nxt_io.load_file_data(self.real_path) extra_data =", "= self.model.lookup_layer(self.layer_path) new_pos = self.model.get_pos_offset(node_path, (GRID_SIZE * 16, 0), layer)", "# set position for un-parent if self.parent_node_path == nxt_path.WORLD: old_root", "parent_path = nxt_path.get_parent_path(self.node_path) name = nxt_path.node_name_from_node_path(self.node_path) if self.attr_name in INTERNAL_ATTRS.ALL:", "input for {}\".format(self.node_path)) return self.setText(\"Set {} exec input to {}\".format(self.node_path,", "= self.prev_selection self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(target_layer.real_path) @processing def redo(self): new_selection = []", "= nxt_path.full_file_expand(self.file_path, start=self.chdir) layer_data = {\"parent_layer\": parent_layer, SAVE_KEY.FILEPATH: self.file_path, SAVE_KEY.REAL_PATH:", "= {'undo': eff_by_undo, 'redo': eff_by_redo} class AddNode(NxtCommand): \"\"\"Add a node", "model, layer_path, other_removed_nodes): \"\"\"Delete node from the layer at the", "muting an existing layer\"\"\" def __init__(self, layer_path, model): super(MuteToggleLayer, self).__init__(model)", "nodes\"\"\" def __init__(self, node_path, model, layer_path, others): super(RevertNode, self).__init__(node_path, model,", "if dirty_set != (self.node_path,): self.model.update_comp_layer(rebuild=True) else: self.model.nodes_changed.emit(dirty_set) @processing def redo(self):", "model.lookup_layer(layer_path).layer_idx() super(RemoveLayer, self).__init__(layer_path, idx, model, None) self.text = \"Removed reference", "new_paths self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.model.selection = self.prev_selection self.setText('Revert {}'.format(self.node_path)) class ParentNodes(NxtCommand): \"\"\"Parent", "@processing def undo(self): layer = self.model.target_layer self.undo_effected_layer(layer.real_path) # undo parent", "self.undo_effected_layer(target_layer.real_path) @processing def redo(self): new_selection = [] self.new_node_paths = []", "= source_layer_path self.target_layer_path = target_layer_path self.stage = model.stage # get", "= pos # This might be a bug? We don't", "self.model.selection = [self.node_path] self.redo_effected_layer(layer.real_path) self.setText('Added node: {}'.format(self.node_path)) class DeleteNode(NxtCommand): def", "layer_path): super(SetNodesPosition, self).__init__(model) self.model = model self.layer_path = layer_path self.new_positions", "node hierarchy information for each node. each node # path", "= self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data, other_removed_nodes=self.others) dirty_nodes += dirty +", "dirties = self.stage.add_node(name=name, data=attr_data, parent=parent_path, layer=layer.layer_idx(), comp_layer=comp, fix_names=False) # Fixme:", "node, old_path in nodes_dict.items(): node_data = self.prev_node_data[old_path] # restore name", "self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.new_attr_name, self.attr_name) self.undo_effected_layer(layer.real_path) @processing def redo(self): layer =", "first_eff_by_undo, first_eff_by_redo def reset_layer_effected(self, layer_just_saved): \"\"\"When the model marks a", "marks a layer as saved we reset the class attr", "undo of this command effects the layer not the redo", "as such by setting the class attr `_first_effected_by_redo` to True.", "data['instance'] self.stage.transfer_node_data(node, self.model.target_layer, apply_data, self.model.comp_layer) local_attrs = self.stage.get_node_local_attr_names(node_path, layers) for", "[] self.toggle_state() for layer_path in self.layer_paths: self.redo_effected_layer(layer_path) @processing def toggle_state(self):", "+ cmd.text() + \" | \" + update_time + \"ms\")", "display_node = comp_layer.lookup(node_path) code_lines = model.stage.get_node_code_lines(display_node, comp_layer) super(LocalizeCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE,", "layer as saved we reset the class attr `_first_effected_by_redo` to", "the class attr `_first_effected_by_redo` to False. This makes sure the", "at the layer path and the comp layer. It is", "start = time.time() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) comp = self.model.comp_layer", "a # setattr. The way it works now we can", "model.stage self.prev_selection = self.model.selection self.prev_node_data = {} self.created_node_paths = []", "@processing def redo(self): self.prev_breaks = user_dir.breakpoints.get(self.layer_path, []) if self.layer_path in", "self.model.processing.emit(True) func(self) self.model.processing.emit(False) return wrapper class NxtCommand(QUndoCommand): def __init__(self, model):", "not in attrs_to_keep: self.stage.delete_node_attr(node=node, attr_name=attr) self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(layers[0].real_path) self.model.selection = self.prev_selection", "nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if", "undo parent common_parent_nodes = {} for old_path, node_data in self.prev_node_data.items():", "cur_cmd is self: return if layer_just_saved in self._layers_effected_by_me: if eff_by_undo:", "class RemoveFromSelection(SetSelection): def __init__(self, paths, model): self.rem_paths = paths new_selection", "node_path, model, layer_path): inst_path = model.get_node_instance_path(node_path, model.comp_layer, expand=False) super(LocalizeInstancePath, self).__init__(node_path,", "def redo(self): super(SetCompute, self).redo() self.setText(\"Changed compute on {}\".format(self.node_path)) class SetNodeComment(SetNodeAttributeValue):", "= False else: # Now the undo of this command", "chdir) @processing def undo(self): new_layer = self.model.lookup_layer(self.real_path) if new_layer in", "self.return_value = self.stage.node_setattr_data(node, self.attr_name, layer=layer, create=True, comp_layer=comp, **self.data) if self.attr_name", "paths that will be deleted in this event loop. \"\"\"", "comp = self.model.comp_layer self.remove_attr = False self.created_node_paths = [] #", "INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr", "self.nice_attr_name) val = str(self.data.get(META_ATTRS.VALUE)) self.setText(\"Set {} to {}\".format(attr_path, val)) #", "empty nodes for node_path in self.created_node_paths: n = layer.lookup(node_path) if", "= self.model.target_layer self.redo_effected_layer(layer.real_path) for node_path in self.node_paths: node = layer.lookup(node_path)", "any created nodes for node_path in self.created_node_paths: n = layer.lookup(node_path)", "if self.remove_attr: self.stage.delete_node_attr(n, self.attr_name) dirties += comp.get_node_dirties(self.node_path) else: result =", "position for un-parent if self.parent_node_path == nxt_path.WORLD: old_root = nxt_path.get_root_path(old_node_path)", "path on \" \"{} to {}\".format(self.node_path, self.data.get(META_ATTRS.VALUE))) self.setText(txt) class SetNodeEnabledState(SetNodeAttributeValue):", "layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if not self.value:", "self.model.layer_alias_changed.emit(self.layer_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) if layer is", "self.parent_node = None self.model = model self.stage = model.stage self.node_paths", "= layer_color_index[0] for c in layer_color_index: if c not in", "elif eff_by_redo: try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed by a", "self.setText(\"Clear all breakpoints\") class SetNodeStartPoint(SetNodeAttributeValue): \"\"\"Set this node as the", "color = color.lower() open_layer_colors += [color] layer_color = layer_color_index[0] for", "# restore layer data pos = self.node_data.get('pos') if pos: self.model.top_layer.positions[self.node_path]", "since now its been saved & the undo effects it", "real path :return: None \"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_path) layer_saved", "class DeleteNode(NxtCommand): def __init__(self, node_path, model, layer_path, other_removed_nodes): \"\"\"Delete node", "then undo was called, thus this redo has a #", "= self.data.get(META_ATTRS.VALUE) if val is None: self.setText(\"Removed exec input for", "node_path, model, layer_path, other_removed_nodes): \"\"\"Delete node from the layer at", "redo(self): layer = self.model.lookup_layer(self.layer_path) self.created_node_paths = [] dirty_nodes = []", "paths, model): self.added_paths = paths curr_selection = model.selection new_paths =", "layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_color = layer.get_color(local=True)", "node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) if len(self.node_paths) == 1:", "layer_path): self.old_node_path = node_path layer = model.lookup_layer(layer_path) parent_path = nxt_path.get_parent_path(node_path)", "copy.deepcopy(self.prev_data) # set attribute value this also adds the attribute", "node_path, new_pos in self.new_positions.items(): self.model._set_node_pos(node_path=node_path, pos=new_pos, layer=layer) if not delta_str:", "getattr(node, INTERNAL_ATTRS.NAME) if name != prev_name: self.stage.set_node_name(node, name=prev_name, layer=layer, force=True)", "source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx() > 0: rm_layer_data = True", "data for all child nodes for undo self.prev_node_data = {}", "eff_by_redo = self._get_effects(layer_just_saved) where_were_at = self.model.undo_stack.index() cur_cmd = self.model.undo_stack.command(max(0, where_were_at", "get local + INTERNAL number of attrs. super(DuplicateNodes, self).__init__(model) self.node_paths", "+= dirty + [self.node_path] if self.node_path in self.model.selection: fix_selection =", "the other_removed_nodes list must be shared by other DeleteNode commands", "layer\"\"\" def __init__(self, layer_path, model): super(MuteToggleLayer, self).__init__(model) self.layer_path = layer_path", "case happens when undo is called after a layer is", "+= dirty self.node_path = layer.get_node_path(nodes[0]) self.model._set_node_pos(node_path=self.node_path, pos=self.pos, layer=layer) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection", "# Fixme: Targeted parenting would avoid the need for a", "= {'undo': eff_by_undo, 'redo': eff_by_redo} def redo_effected_layer(self, layer_path): \"\"\"Adds layer", "self.model.comp_layer) self.prev_node_data[node_path] = node_data self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.redo_effected_layer(layer.real_path) self.model.selection = self.prev_selection if", "== 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) self.setText(\"Parent", "redo(self): layer = self.model.lookup_layer(self.layer_path) if self.value: func = self.model._add_breakpoint else:", "self.layer_path = layer_path self.stage = model.stage # get undo data", "data=attr_data, parent=parent_path, layer=layer.layer_idx(), comp_layer=comp, fix_names=False) # Fixme: Targeted parenting would", "data is_start = self.model.get_is_node_start(self.node_path, layer) self.node_data['start'] = is_start self.node_data['save_dict'] =", "= [] if parent_node: child_order = getattr(parent_node, INTERNAL_ATTRS.CHILD_ORDER) else: ancestors", "of descendants for each top node so when # they", "super(SetAttributeComment, self).__init__(node_path, attr_name, data, model, layer_path) def redo(self): super(SetAttributeComment, self).redo()", "= self.value self.model.collapse_changed.emit(list(self.prev_values.keys())) if len(self.node_paths) == 1: path_str = self.node_paths[0]", "it set text once, relying on consistent delta. x_delta =", "self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, None, model, layer_path) def redo(self): self.setText(\"Revert exec input", "= model.get_node_pos(path) @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) for node_path,", "= model self.stage = model.stage self.layer_path = layer_path @processing def", "model we mark it as effected. This case happens when", "This makes sure the layer is properly marked as unsaved", "Remove our created empty nodes for node_path in self.created_node_paths: n", "attr_name, data, model, layer_path) def redo(self): super(AddAttribute, self).redo() self.remove_attr =", "comment, model, layer_path): data = {META_ATTRS.as_save_key(META_ATTRS.COMMENT): comment} super(SetAttributeComment, self).__init__(node_path, attr_name,", "\" + cmd.text() + \" | \" + update_time +", "[] self.created_node_paths = [] # get node selection for undo", "{'undo': eff_by_undo, 'redo': eff_by_redo} def redo_effected_layer(self, layer_path): \"\"\"Adds layer to", "= model self.stage = model.stage self.insert_idx = idx self.file_path =", "self.stage.delete_node(node, layer, remove_layer_data=False) dirty_nodes += dirty node = layer.lookup(self.node_path) source_layer", "self.attr_name _, dirties = self.stage.add_node(name=name, data=attr_data, parent=parent_path, layer=layer.layer_idx(), comp_layer=comp, fix_names=False)", "@processing def redo(self): delta_str = None layer = self.model.lookup_layer(self.layer_path) for", "= model self.stage = model.stage self.prev_selection = self.model.selection self.prev_node_data =", "deleted self.remove_attr = True super(DeleteAttribute, self).undo() layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path)", "self.model.lookup_layer(self.real_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.set_target_layer(LAYERS.TOP) self.model.update_comp_layer(rebuild=True) self.model.layer_removed.emit(self.real_path)", "**self.prev_data) if self.attr_name == INTERNAL_ATTRS.INSTANCE_PATH: dirties += result if self.attr_name", "it deletes node, this behavior is depended upon! :param node_path:", "def __init__(self, node_path, attr_name, model, layer_path): node = model.comp_layer.lookup(node_path) data", "top_node_descendant_list if not node_hierarchy_data: return # parent self.node_path_data = self.stage.parent_nodes(nodes,", "if self.parent_node_path == nxt_path.WORLD: old_root = nxt_path.get_root_path(old_node_path) new_pos = self.model.get_pos_offset(old_root,", "node_path in self.node_paths: layer.collapse[node_path] = self.value self.model.comp_layer.collapse[node_path] = self.value self.model.collapse_changed.emit(list(self.prev_values.keys()))", "2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.new_layer_path) self.model.layer_added.emit(self.new_layer_path) self.setText(\"New layer", "self.setText(txt) class SetNodeEnabledState(SetNodeAttributeValue): \"\"\"Set node enabled state\"\"\" def __init__(self, node_path,", "model, layer_path): comp_layer = model.comp_layer display_node = comp_layer.lookup(node_path) code_lines =", "= self.prev_selection @processing def redo(self): self.prev_node_data = {} self.node_path_data =", "node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) @processing def redo(self): layer", "dirties += result if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path)", "= True self._layers_effected_by_me[layer_just_saved] = {'undo': eff_by_undo, 'redo': eff_by_redo} def redo_effected_layer(self,", "to the model's set of effected (unsaved) layers. If this", "= getattr(closest_ancestor, INTERNAL_ATTRS.CHILD_ORDER) self.node_data['ancestor_child_order'] = (closest_ancestor_path, ancestor_child_order[:]) # Attr display", "layer target_node = self.model.target_layer.lookup(node_path) if not target_node: new_nodes, new_paths, dirty", "True # Set redo to False since now its been", "if self.node_data['start']: self.model._add_start_node(self.node_path, layer) # restore layer data pos =", "self.model.node_has_parent(new_node_path, target_layer) if not has_parent and new_node_path != node_path: pos", "= self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.prev_values = {} for np in self.node_paths:", "= model.selection[:] for path in paths: try: new_selection.remove(path) except ValueError:", "not None: # delete node _, dirty = self.stage.delete_node(node, layer,", "model, layer_path): super(RenameAttribute, self).__init__(model) self.node_path = node_path self.attr_name = attr_name", "self.redo_effected_layer(layer.real_path) self.setText(\"Remove {} attr from {}\".format(self.attr_name, self.node_path)) class RevertCompute(SetNodeAttributeValue): \"\"\"Revert", "layer_color_index: if c not in open_layer_colors: layer_color = c break", "to be set if undo is called layer = self.model.lookup_layer(self.layer_path)", "self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(SetCompute, self).redo() self.setText(\"Changed", "layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.set_alias(self.old_alias) else: layer.set_alias_over(self.old_alias)", "name, model, layer_path): self.old_node_path = node_path layer = model.lookup_layer(layer_path) parent_path", "undo effects it eff_by_redo = False self.model.effected_layers.add(layer_path) elif eff_by_redo: try:", "this command and the same # layer was saved again.", "data.pop(META_ATTRS.SOURCE) super(LocalizeUserAttr, self).__init__(node_path, attr_name, data, model, layer_path) class LocalizeCompute(SetNodeAttributeValue): \"\"\"Localize", "class SetNodeStartPoint(SetNodeAttributeValue): \"\"\"Set this node as the execution start point\"\"\"", "\"\"\"Set Layer Alias\"\"\" def __init__(self, alias, layer_path, model): super(SetLayerAlias, self).__init__(model)", "on {}\".format(self.node_path)) class LocalizeInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): inst_path", "None \"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved) where_were_at = self.model.undo_stack.index() cur_cmd", "self.model = model self.layer_path = layer_path self.new_positions = node_positions self.old_positions", "path of layer :param model: StageModel \"\"\" super(SetLayerColor, self).__init__(model) self.layer_path", "layer the top layer store an overrides. :param color: string", "if not delta_str: pos = new_pos prev_pos = self.old_positions[node_path] #", "SetNodeComment(SetNodeAttributeValue): \"\"\"Set node comment\"\"\" def __init__(self, node_path, comment, model, layer_path):", "way duplicate can just be a # setattr. The way", "= () for dirty in dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name)", "an existing layer\"\"\" def __init__(self, layer_path, model): super(MuteToggleLayer, self).__init__(model) self.layer_path", "has a # net zero effect on the layer try:", "self.model.get_node_pos(self.node_path), 'break': is_break} closest_ancestor = layer.ancestors(self.node_path) if closest_ancestor: closest_ancestor =", "def __init__(self, node_path, model, layer_path): super(RevertExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, None, model,", "each node # path is placed in a list of", "# undo_debug(self, start) @processing def redo(self): start = time.time() created_node", "super(DeleteAttribute, self).redo() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) def redo(self): # Overload", "self.model.update_comp_layer(rebuild=True) self.model.layer_removed.emit(self.real_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers) if 0", "redo(self): self.created_node_paths = [] super(RevertNode, self).redo() layer = self.model.lookup_layer(self.layer_path) #", "the undo of this command effects the layer not the", "{}\".format(self.node_path)) class RevertNode(DeleteNode): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, model, layer_path,", "= False for p in self.others[:]: self.others += comp_layer.get_node_dirties(p) _,", "= self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path) for node_path, prev_value in self.prev_values.items(): layer.collapse[node_path] =", "node: {}\".format(self.node_path)) class SetNodeAttributeData(NxtCommand): \"\"\"Set attribute value\"\"\" def __init__(self, node_path,", "not None: self.model._set_attr_display_state(self.node_path, attr_display) user_dir.breakpoints = self.prev_breaks ancestor_tuple = self.node_data.get('ancestor_child_order')", "child_order = getattr(parent_node, INTERNAL_ATTRS.CHILD_ORDER) else: ancestors = layer.ancestors(node_path) if ancestors:", "model, layer_path) class SetNodeChildOrder(SetNodeAttributeValue): \"\"\"Set node child order\"\"\" def __init__(self,", "break real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir) layer_data = {\"parent_layer\": parent_layer, SAVE_KEY.FILEPATH:", "redo(self): self.setText(\"Revert exec input on {}\".format(self.node_path)) class RevertNode(DeleteNode): \"\"\"Localize nodes\"\"\"", "self.stage.add_node(name=self.node_data['name'], data=self.node_data['save_dict'], parent=parent, layer=layer.layer_idx(), comp_layer=comp_layer, fix_names=False) if self.node_data['break']: self.model._add_breakpoint(self.node_path, layer)", "= model.stage self.node_paths = node_paths # resulting nodes self.node_path_data =", "# undo parent common_parent_nodes = {} for old_path, node_data in", "layer_path) def redo(self): super(SetNodeChildOrder, self).redo() self.setText(\"Change child order on {}\".format(self.node_path))", "nxt_editor logger = logging.getLogger(nxt_editor.LOGGER_NAME) def processing(func): def wrapper(self): self.model.processing.emit(True) func(self)", "queue # was moved to an index before this command", "self.rem_paths = paths new_selection = model.selection[:] for path in paths:", "if len(self.node_paths) == 1: nodes_str = self.node_paths[0] else: nodes_str =", "node = model.comp_layer.lookup(node_path) data = model.stage.get_node_attr_data(node, attr_name, model.comp_layer) if META_ATTRS.SOURCE", "comment on {}\".format(attr_path)) class SetCompute(SetNodeAttributeValue): \"\"\"Set node code value\"\"\" def", "comp_layer=self.model.comp_layer) dirty_nodes += dirty self.node_path = layer.get_node_path(nodes[0]) self.model._set_node_pos(node_path=self.node_path, pos=self.pos, layer=layer)", "created empty nodes for node_path in self.created_node_paths: n = layer.lookup(node_path)", "redo(self): super(SetNodeChildOrder, self).redo() self.setText(\"Change child order on {}\".format(self.node_path)) class SetLayerAlias(NxtCommand):", "= self.model.selection # get previous node data for all child", "self.model.lookup_layer(self.layer_path) for node_path, old_pos in self.old_positions.items(): self.model._set_node_pos(node_path=node_path, pos=old_pos, layer=layer) self.undo_effected_layer(self.layer_path)", "# Fixme: The next 2 lines each build once self.model.update_comp_layer(rebuild=True)", "= self.model.comp_layer dirties = [self.node_path] # delete any created nodes", "\"\"\"Removes layer from the model's set of effected (unsaved) layers.", "the dirty nodes dirty_set = tuple(set(dirty)) self.undo_effected_layer(self.layer_path) if dirty_set !=", "already been marked as undo effects the # layer, meaning", "old_path} else: common_parent_nodes[prev_parent_path][node] = old_path child_order_tuple = node_data.get(INTERNAL_ATTRS.CHILD_ORDER) if child_order_tuple:", "self.model._set_attr_display_state(old_node_path, attr_state) idx += 1 self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection @processing", "self).__init__(model) self.node_paths = node_paths self.value = value self.model = model", "self.model.update_comp_layer() old_name = nxt_path.make_attr_path(self.node_path, attr_name) new_name = nxt_path.make_attr_path(self.node_path, new_attr_name) self.setText(\"Rename", "layer_path) class SetNodeChildOrder(SetNodeAttributeValue): \"\"\"Set node child order\"\"\" def __init__(self, node_path,", "self).__init__(node_path, INTERNAL_ATTRS.CHILD_ORDER, child_order, model, layer_path) def redo(self): super(SetNodeChildOrder, self).redo() self.setText(\"Change", "as effected in the model we mark it as effected.", "\"\"\"Localize nodes\"\"\" def __init__(self, node_path, model, layer_path): comp_layer = model.comp_layer", "self.layer_path = layer_path @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer,", "the undo effects it eff_by_redo = False self.model.effected_layers.add(layer_path) elif eff_by_redo:", "super(ClearBreakpoints, self).__init__() self.model = model self.layer_path = layer_path self.prev_breaks =", "self.model.set_target_layer(self.real_path) self.model.layer_added.emit(self.real_path) self.setText(\"Added reference to {}\".format(self.real_path)) class RemoveLayer(ReferenceLayer): \"\"\"Remove existing", "__init__(self, paths, model): self.rem_paths = paths new_selection = model.selection[:] for", "if attr not in attrs_to_keep: self.stage.delete_node_attr(node=node, attr_name=attr) self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(layers[0].real_path) self.model.selection", "this graph\"\"\" def __init__(self, node_paths, descendants, model, source_layer_path, target_layer_path): #", "target_node = self.model.target_layer.lookup(node_path) if not target_node: new_nodes, new_paths, dirty =", "exec_source, model, layer_path) def redo(self): super(SetNodeExecuteSources, self).redo() val = self.data.get(META_ATTRS.VALUE)", "self).redo() self.setText(\"Revert instance path on {}\".format(self.node_path)) class LocalizeExecPath(SetNodeAttributeValue): def __init__(self,", "new_selection = [] self.new_node_paths = [] source_layer = self.model.lookup_layer(self.source_layer_path) target_layer", "attr `_first_effected_by_redo` to True. :param layer_path: string of layer real", "logger = logging.getLogger(nxt_editor.LOGGER_NAME) def processing(func): def wrapper(self): self.model.processing.emit(True) func(self) self.model.processing.emit(False)", "= self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.setText(\"Remove {} attr from {}\".format(self.attr_name, self.node_path)) class", "dirty = self.stage.duplicate_node(node=node, layer=target_layer, descendants=self.descendants) new_selection.append(target_layer.get_node_path(new[0])) # process new nodes", "attr_path = nxt_path.make_attr_path(self.node_path, self.nice_attr_name) val = str(self.data.get(META_ATTRS.VALUE)) self.setText(\"Set {} to", "we reset the class attr `_first_effected_by_redo` to False. This makes", "alias to {}\".format(layer.filepath, self.alias)) class NewLayer(NxtCommand): \"\"\"Add new layer\"\"\" def", "ancestor_path = layer.get_node_path(ancestor) child_order = self.stage.get_node_child_order(ancestor) node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path, child_order]", "undo(self): layer = self.model.lookup_layer(self.layer_path) if not self.value: func = self.model._add_breakpoint", "redo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_alias =", "is properly marked as unsaved even if we undo an", "insure attr is deleted self.remove_attr = True super(DeleteAttribute, self).undo() layer", "self.new_positions.items(): self.model._set_node_pos(node_path=node_path, pos=new_pos, layer=layer) if not delta_str: pos = new_pos", "model, layer_path) def redo(self): self.setText(\"Revert exec input on {}\".format(self.node_path)) class", "comment\"\"\" def __init__(self, node_path, comment, model, layer_path): super(SetNodeComment, self).__init__(node_path, INTERNAL_ATTRS.COMMENT,", "model): self.added_paths = paths curr_selection = model.selection new_paths = curr_selection", "else: self.setText(\"Expanded {}\".format(path_str)) class SetNodeExecuteSources(SetNodeAttributeValue): \"\"\"Set node execute sources\"\"\" def", "[] for layer in self.stage._sub_layers: color = layer.color if color:", "self.layer_path = layer_path self.alias = alias self.old_alias = '' self.model", "model, layer_path) def redo(self): super(LocalizeCompute, self).redo() self.setText(\"Localize compute on {}\".format(self.node_path))", "super(LocalizeExecPath, self).redo() self.setText(\"Localize exec input on {}\".format(self.node_path)) class RevertExecPath(SetNodeAttributeValue): def", "False self.model.effected_layers.add(layer_path) elif eff_by_redo: try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed", "\"\"\"Refernce existing layer\"\"\" def __init__(self, file_path, idx, model, chdir): super(ReferenceLayer,", "set position has_parent = self.model.node_has_parent(new_node_path, target_layer) if not has_parent and", "= nxt_path.make_attr_path(self.node_path, attr_name) new_name = nxt_path.make_attr_path(self.node_path, new_attr_name) self.setText(\"Rename {} to", "{META_ATTRS.VALUE: value} super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data, model, layer_path) class RenameNode(SetNodeAttributeValue):", "new_selection self.model.update_comp_layer(rebuild=True) if len(self.node_paths) == 1: nodes_str = self.node_paths[0] else:", "nxt_path.get_root_path(old_node_path) new_pos = self.model.get_pos_offset(old_root, (GRID_SIZE * 14, GRID_SIZE), self.model.top_layer) self.model._set_node_pos(new_node_path,", "{} self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP self.return_value = None self.prev_selection", "self.model.lookup_layer(self.layer_path) if not self.value: func = self.model._add_breakpoint else: func =", "node # path is placed in a list of descendants", "node_path, comment, model, layer_path): super(SetNodeComment, self).__init__(node_path, INTERNAL_ATTRS.COMMENT, comment, model, layer_path)", "self.model._remove_breakpoint for node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) if len(self.node_paths)", "layer_path) def redo(self): super(SetNodeComment, self).redo() self.setText(\"Changed comment on {}\".format(self.node_path)) class", "{}\".format(layer.filepath, self.color)) def _add_node_hierarchy(base_node_path, model, layer): stage = model.stage comp_layer", "@processing def undo(self): start = time.time() layer = self.model.lookup_layer(self.layer_path) self.undo_effected_layer(layer.real_path)", "state = not layer.get_muted(local=True) layer.set_muted(state) self.layer_paths.append(layer.real_path) else: state = not", "+ update_time + \"ms\") def redo_debug(cmd, start): update_time = str(int(round((time.time()", "super(AddNode, self).__init__(model) self.name = name self.data = data self.parent_path =", "self.attr_name) changed_attrs += (attr_path,) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr", "if self.attr_name in INTERNAL_ATTRS.ALL: dirties += comp.get_node_dirties(self.node_path) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp)", "self.node_data = {} self.others = other_removed_nodes @processing def undo(self): layer", "else: self.model.nodes_changed.emit(dirty_set) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer =", "self).__init__(model) self.node_paths = node_paths self.model = model self.stage = model.stage", "if attr_state is not None: self.model._set_attr_display_state(new_node_path, attr_state) # set position", "c not in open_layer_colors: layer_color = c break real_path =", "layer) self.model._add_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._add_start_node(self.node_path, layer) # restore layer", "self.new_positions.keys(): self.old_positions[path] = model.get_node_pos(path) @processing def undo(self): layer = self.model.lookup_layer(self.layer_path)", "= INTERNAL_ATTRS.as_save_key(self.attr_name) attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)} else: attr_data = {nxt_io.SAVE_KEY.ATTRS:", "to True. :param layer_path: string of layer real path :return:", "the model we mark it as effected. This case happens", "node_paths # resulting nodes self.node_path_data = {} self.new_node_paths = []", "undo(self): user_dir.breakpoints[self.layer_path] = self.prev_breaks self.model.nodes_changed.emit(tuple(self.prev_breaks)) @processing def redo(self): self.prev_breaks =", "delta_str = None layer = self.model.lookup_layer(self.layer_path) for node_path, new_pos in", "= self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_data", "apply_data['attributes'] = data.get('attributes', {}) attrs_to_keep = apply_data['attributes'].keys() apply_data['enabled'] = data.get('enabled')", "def __init__(self, node_positions, model, layer_path): super(SetNodesPosition, self).__init__(model) self.model = model", "self.others += comp_layer.get_node_dirties(p) _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data,", "is_start self.node_data['save_dict'] = get_node_as_dict(node) if self.node_data['break']: self.model._remove_breakpoint(self.node_path, layer) self.model._remove_breakpoint(self.node_path, self.stage.top_layer)", "= c break real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir) layer_data = {\"parent_layer\":", "self.created_node_paths: n = self.model.target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n,", "{}\".format(path_str)) class SetNodeExecuteSources(SetNodeAttributeValue): \"\"\"Set node execute sources\"\"\" def __init__(self, node_path,", "len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths)", "self.node_data['attr_display'] = attr_display # get layer data is_start = self.model.get_is_node_start(self.node_path,", "self.others = other_removed_nodes @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) comp_layer", "self.setText(\"Removed exec input for {}\".format(self.node_path)) return self.setText(\"Set {} exec input", "None self.created_node_paths = [] @processing def undo(self): layer = self.model.lookup_layer(self.layer_path)", "another base command class that can be used to #", "self.model.about_to_rename.emit() self.prev_data['force'] = True super(RenameNode, self).undo() self.node_path = self.old_node_path self.model.selection", "None: parent_path = nxt_path.get_parent_path(self.node_path) name = nxt_path.node_name_from_node_path(self.node_path) if self.attr_name in", "attr_name self.new_attr_name = new_attr_name self.model = model self.stage = model.stage", "= 'nodes' self.setText('Duplicated {}'.format(nodes_str)) class InstanceNode(SetNodeAttributeValue): \"\"\"Instance nodes on this", "{}'.format(str(path_str))) class LocalizeUserAttr(SetNodeAttributeData): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, attr_name, model,", "n = self.model.target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer=self.model.target_layer,", "QUndoCommand # Internal from nxt_editor import colors from nxt_editor import", "= new_layer.real_path self.redo_effected_layer(new_layer.real_path) # Fixme: The next 2 lines each", "try: first_eff_by_redo = self._layers_effected_by_me[layer_path]['redo'] except KeyError: pass return first_eff_by_undo, first_eff_by_redo", "node as an empty node new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path,", "layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx() > 0: rm_layer_data =", "= {self.return_value: self.data.get(META_ATTRS.VALUE)} else: attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}} self.return_value", "if n is not None: self.stage.delete_node(n, layer=layer, comp_layer=comp, remove_layer_data=False) n", "self.model.layer_color_changed.emit(self.layer_path) self.setText(\"Set {} color to {}\".format(layer.filepath, self.color)) def _add_node_hierarchy(base_node_path, model,", "super(SetNodesPosition, self).__init__(model) self.model = model self.layer_path = layer_path self.new_positions =", "layer_path): super(SetNodeChildOrder, self).__init__(node_path, INTERNAL_ATTRS.CHILD_ORDER, child_order, model, layer_path) def redo(self): super(SetNodeChildOrder,", "= layer.lookup(self.node_path) self.data = self.stage.get_node_attr_data(node, self.attr_name, layer) def undo(self): super(DeleteAttribute,", "layer=layer) self.undo_effected_layer(self.layer_path) @processing def redo(self): delta_str = None layer =", "def redo(self): start = time.time() created_node = False self.prev_selection =", "self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.attr_name, self.new_attr_name) self.redo_effected_layer(layer.real_path) def rename_attribute(self, layer, attr_name, new_attr_name):", "{} alias to {}\".format(layer.filepath, self.alias)) class NewLayer(NxtCommand): \"\"\"Add new layer\"\"\"", "expand=False) super(LocalizeInstancePath, self).__init__(node_path, INTERNAL_ATTRS.INSTANCE_PATH, inst_path, model, layer_path) def redo(self): super(LocalizeInstancePath,", "model): super(LocalizeNodes, self).__init__(model) self.node_paths = node_paths self.model = model self.stage", "in INTERNAL_ATTRS.REQUIRES_RECOMP self.return_value = None self.prev_selection = model.selection @processing def", "ancestors = layer.ancestors(node_path) if ancestors: ancestor = ancestors[0] ancestor_path =", "super(RenameNode, self).undo() self.node_path = self.old_node_path self.model.selection = [self.node_path] def redo(self):", "localize source node self.stage.transfer_node_data(target_node, self.model.target_layer, display_node, self.model.comp_layer) self.prev_node_data[node_path] = node_data", "layer, remove_layer_data=False) dirty_nodes += dirty node = layer.lookup(self.node_path) source_layer =", "user_dir.breakpoints = self.prev_breaks ancestor_tuple = self.node_data.get('ancestor_child_order') if ancestor_tuple: ancestor_path, ancestor_child_order", "{}\".format(self.node_path)) class RenameAttribute(NxtCommand): \"\"\"Rename attribute\"\"\" def __init__(self, node_path, attr_name, new_attr_name,", "child_order] self.prev_node_data[node_path] = node_data nodes += [node] # get current", "layer_path): \"\"\"Removes layer from the model's set of effected (unsaved)", "def undo(self): target_layer = self.model.lookup_layer(self.target_layer_path) # delete duplicated nodes for", "node = layer.lookup(node_path) top_node = self.stage.get_top_node(node, self.model.target_layer) if top_node is", "apply_data['attributes'].keys() apply_data['enabled'] = data.get('enabled') if data.get('instance'): apply_data['instance'] = data['instance'] self.stage.transfer_node_data(node,", "list(common_parent_nodes.keys()): common_parent_nodes[prev_parent_path] = {node: old_path} else: common_parent_nodes[prev_parent_path][node] = old_path child_order_tuple", "self.model.lookup_layer(self.source_layer_path) target_layer = self.model.lookup_layer(self.target_layer_path) self.redo_effected_layer(target_layer.real_path) for node_path in self.node_paths: node", "not eff_by_undo and layer_unsaved: return if not eff_by_undo: self._layers_effected_by_me[layer_path] =", "command data self.pos = pos or [0.0, 0.0] self.prev_selection =", "path :return: None \"\"\" layer_unsaved = layer_path in self.model.effected_layers eff_by_undo,", "the model's set of effected (unsaved) layers. If the layer", "import user_dir from nxt import nxt_path from nxt.nxt_layer import LAYERS,", "self.model.target_layer self.redo_effected_layer(layer.real_path) for node_path in self.node_paths: node = layer.lookup(node_path) name", "comp_layer) super(LocalizeCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, code_lines, model, layer_path) def redo(self): super(LocalizeCompute,", "effect on the layer try: self.model.effected_layers.remove(layer_path) except KeyError: # Removed", "node_path, value, model, layer_path): super(SetNodeStartPoint, self).__init__(node_path, INTERNAL_ATTRS.START_POINT, value, model, layer_path)", "state = not layer.get_soloed(local=True) layer.set_soloed(state) self.layer_paths.append(layer.real_path) else: state = not", "path :return: None \"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_path) layer_saved =", "self.stage.get_node_attr_data(node, self.attr_name, layer, quiet=True) if self.prev_data: self.prev_data = copy.deepcopy(self.prev_data) #", "self.attr_name, self.new_attr_name) self.redo_effected_layer(layer.real_path) def rename_attribute(self, layer, attr_name, new_attr_name): node =", "= self.node_paths[0] else: path_str = str(self.node_paths) self.setText('Localize {}'.format(str(path_str))) class LocalizeUserAttr(SetNodeAttributeData):", "c in layer_color_index: if c not in open_layer_colors: layer_color =", "as the execution start point\"\"\" def __init__(self, node_path, value, model,", "{} for path in self.new_positions.keys(): self.old_positions[path] = model.get_node_pos(path) @processing def", "= {} self.new_node_paths = [] self.created_node_paths = [] # get", "chdir): super(ReferenceLayer, self).__init__(model) self.model = model self.stage = model.stage self.insert_idx", "self.data = data self.stage = model.stage self.layer_path = layer_path self.created_node_paths", "dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) if self.recomp:", "{}\".format(attr_path, val)) # redo_debug(self, start) class SetNodeAttributeValue(SetNodeAttributeData): def __init__(self, node_path,", "\"\"\"Add a node to the graph\"\"\" def __init__(self, name, data,", "model, layer_path) def redo(self): super(LocalizeExecPath, self).redo() self.setText(\"Localize exec input on", "attrs. super(DuplicateNodes, self).__init__(model) self.node_paths = node_paths self.descendants = descendants self.source_layer_path", "to get local + INTERNAL number of attrs. super(DuplicateNodes, self).__init__(model)", "self._layers_effected_by_me: if eff_by_undo: # This command has already been marked", "name prev_name = node_data['name'] name = getattr(node, INTERNAL_ATTRS.NAME) if name", "= data self.stage = model.stage self.layer_path = layer_path self.created_node_paths =", "self.model.selection = [self.node_path] def redo(self): self.model.about_to_rename.emit() super(RenameNode, self).redo() self.node_path =", "{}'.format(nodes_str, delta_str)) self.redo_effected_layer(layer.real_path) class SetSelection(QUndoCommand): \"\"\"Select Nodes and Connections\"\"\" def", "in dirties: attr_path = nxt_path.make_attr_path(dirty, self.attr_name) changed_attrs += (attr_path,) self.model.attrs_changed.emit(changed_attrs)", "open_layer_colors = [] for layer in self.stage._sub_layers: color = layer.color", "model.selection @processing def undo(self): start = time.time() layer = self.model.lookup_layer(self.layer_path)", "= layer.lookup(self.node_path) if n is not None: if self.remove_attr: self.stage.delete_node_attr(n,", "def undo_effected_layer(self, layer_path): \"\"\"Removes layer from the model's set of", "1: nodes_str = node_path else: nodes_str = 'nodes' self.setText('Move {}", "created nodes for node_path in self.created_node_paths: node = layer.lookup(node_path) if", "parent_layer = self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None", "None layer_color_index = [str(k.name()) for k in colors.LAYER_COLORS] open_layer_colors =", "new_path = self.node_path_data[old_path] node = layer.lookup(new_path) if prev_parent_path not in", "None: self.stage.delete_node(n, target_layer, remove_layer_data=True) self.model.selection = self.prev_selection self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(target_layer.real_path) @processing", "LocalizeNodes(NxtCommand): \"\"\"Localize nodes\"\"\" def __init__(self, node_paths, model): super(LocalizeNodes, self).__init__(model) self.node_paths", "all breakpoints\") class SetNodeStartPoint(SetNodeAttributeValue): \"\"\"Set this node as the execution", "ancestor = layer.lookup(ancestor_path) if ancestor: setattr(ancestor, INTERNAL_ATTRS.CHILD_ORDER, ancestor_child_order) self.model.selection =", "self.model._remove_breakpoint(self.node_path, layer) self.model._remove_breakpoint(self.node_path, self.stage.top_layer) if self.node_data['start']: self.model._remove_start_node(self.node_path, layer) node =", "(INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: changed_attrs = () for dirty in", "= self.old_positions[node_path] # Only letting it set text once, relying", "= self.model.get_node_pos(node_path) node_data['name'] = name node_data['parent'] = parent_path parent_node =", "once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.real_path) self.model.layer_added.emit(self.real_path) self.setText(\"Added reference to {}\".format(self.real_path)) class RemoveLayer(ReferenceLayer):", "self.recomp: changed = tuple([self.node_path] + self.created_node_paths) self.model.nodes_changed.emit(changed) self.model.selection = self.prev_selection", "layer.lookup(node_path) if node is not None: self.stage.delete_node(node, layer) idx =", "in list(user_dir.breakpoints.keys()): user_dir.breakpoints.pop(self.layer_path) self.model.nodes_changed.emit(tuple(self.prev_breaks)) self.setText(\"Clear all breakpoints\") class SetNodeStartPoint(SetNodeAttributeValue): \"\"\"Set", "each top node so when # they are un-parented each", "new_node_paths, dirty def undo_debug(cmd, start): update_time = str(int(round((time.time() - start)", "name=prev_name, layer=layer, force=True) # restore position if self.parent_node_path != nxt_path.WORLD:", "= self.stage.get_node_data(node, layer) node_data['pos'] = self.model.get_node_pos(node_path) node_data['name'] = name node_data['parent']", "self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path) else: parent_layer = None layer_color_index =", "self).undo() self.setText(self.text) class MuteToggleLayer(NxtCommand): \"\"\"Toggles muting an existing layer\"\"\" def", "the layer at the layer path and the comp layer.", "self.remove_attr = True super(DeleteAttribute, self).undo() layer = self.model.lookup_layer(self.layer_path) self.redo_effected_layer(layer.real_path) self.setText(\"Remove", "self.model, layer) self.created_node_paths += new_paths self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.model.selection = self.prev_selection self.setText('Revert", "{} self.others = other_removed_nodes @processing def undo(self): layer = self.model.lookup_layer(self.layer_path)", "parent_layer = None layer_color_index = [str(k.name()) for k in colors.LAYER_COLORS]", "{} to {}\".format(old_name, new_name)) class SetAttributeComment(SetNodeAttributeData): \"\"\"Set attribute comment\"\"\" def", "build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.real_path) self.model.layer_added.emit(self.real_path) self.setText(\"Added reference to {}\".format(self.real_path)) class", "set if undo is called layer = self.model.lookup_layer(self.layer_path) node =", "self.layer_paths = [] self.toggle_state() for layer_path in self.layer_paths: self.redo_effected_layer(layer_path) @processing", "self.model.selection # get previous node data for all child nodes", "= {} for np in self.node_paths: self.prev_values[np] = self.model.get_node_collapse(np, layer)", "node_path, attr_name, data, model, layer_path): super(SetNodeAttributeData, self).__init__(model) self.node_path = node_path", "undo_effected_layer(self, layer_path): \"\"\"Removes layer from the model's set of effected", "self.prev_node_data = {} self.node_path_data = {} self.new_node_paths = [] self.created_node_paths", "[] nodes = [] layer = self.model.target_layer self.redo_effected_layer(layer.real_path) for node_path", "= self.model.undo_stack.index() cur_cmd = self.model.undo_stack.command(max(0, where_were_at - 1)) if cur_cmd", "self.prev_node_data.items(): prev_parent_path = node_data['parent'] prev_parent_node = layer.lookup(prev_parent_path) new_path = self.node_path_data[old_path]", "node from the layer at the layer path and the", "INTERNAL_ATTRS.PARENT_PATH) self.stage.get_node_data(node, layer) node_data = self.stage.get_node_data(node, layer) node_data['pos'] = self.model.get_node_pos(node_path)", "for a given layer, if the layer is not a", "layer.set_alias_over(self.alias) self.redo_effected_layer(self.model.top_layer.real_path) self.model.layer_alias_changed.emit(self.layer_path) self.setText(\"Set {} alias to {}\".format(layer.filepath, self.alias)) class", "old_node_path in self.node_paths: new_node_path = self.new_node_paths[idx] attr_state = self.model.remove_attr_display_state(new_node_path) if", "next 2 lines each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.new_layer_path) self.model.layer_added.emit(self.new_layer_path) self.setText(\"New", "top_node_path = layer.get_node_path(top_node) top_node_descendant_list = node_hierarchy_data.get(top_node, []) top_node_descendant_list += [node]", "1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) if self.value:", "for path in self.new_positions.keys(): self.old_positions[path] = model.get_node_pos(path) @processing def undo(self):", "get previous node data for all child nodes for undo", "self.created_node_paths dirty_nodes += [self.node_path] self.undo_effected_layer(self.layer_path) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = self.prev_selection @processing", "layer_path in self.layer_paths: self.redo_effected_layer(layer_path) @processing def toggle_state(self): layer = self.model.lookup_layer(self.layer_path)", "== 1: path_str = self.node_paths[0] else: path_str = str(self.node_paths) if", "self.stage = model.stage self.layer_path = layer_path self.created_node_paths = [] self.remove_attr", "list_merger) from nxt import nxt_io from nxt import GRID_SIZE import", "in (INTERNAL_ATTRS.INSTANCE_PATH, INTERNAL_ATTRS.PARENT_PATH)): self.model.nodes_changed.emit(dirties) else: changed_attrs = () for dirty", "RevertNode(DeleteNode): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, model, layer_path, others): super(RevertNode,", "[] self.node_path = node_path def undo(self): layer = self.model.lookup_layer(self.layer_path) #", "parent_path, nodes_dict in common_parent_nodes.items(): for node, old_path in nodes_dict.items(): node_data", "{} if self.parent_node_path is nxt_path.WORLD: for node_path in self.node_paths: node", "self.new_node_paths = [] @processing def undo(self): target_layer = self.model.lookup_layer(self.target_layer_path) #", "= list(self.node_path_data.values()) idx = 0 for new_node_path in self.new_node_paths: old_node_path", "to a node.\"\"\" def __init__(self, node_path, attr_name, value, model, layer_path):", "INTERNAL_ATTRS.CHILD_ORDER) old_child_order = getattr(n, INTERNAL_ATTRS.CHILD_ORDER) new_child_order = list_merger(display_child_order, old_child_order) setattr(n,", "attr_name, model.comp_layer) if META_ATTRS.SOURCE in data: data.pop(META_ATTRS.SOURCE) super(LocalizeUserAttr, self).__init__(node_path, attr_name,", "layer_path, others): super(RevertNode, self).__init__(node_path, model, layer_path, others) self.rebuild = False", "state = not layer.get_muted(local=False) self.model.top_layer.set_mute_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_mute_changed.emit((self.layer_path,)) self.setText(\"Toggle", "self.setText(\"New layer {}\".format(self.new_layer_path)) class ReferenceLayer(NxtCommand): \"\"\"Refernce existing layer\"\"\" def __init__(self,", "self.undo_effected_layer(layers[0].real_path) self.model.selection = self.prev_selection @processing def redo(self): self.prev_node_data = {}", "layer_color = c break real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir) layer_data =", "__init__(self, file_path, idx, model, chdir): super(ReferenceLayer, self).__init__(model) self.model = model", "model): super(MuteToggleLayer, self).__init__(model) self.layer_path = layer_path self.model = model self.layer_paths", "False since now its been saved & the undo effects", "# time and duplicate needs to get local + INTERNAL", "of layer realpath :param other_removed_nodes: list of node paths that", "@processing def redo(self): start = time.time() created_node = False self.prev_selection", "= layer_path self.prev_values = {} @processing def undo(self): layer =", "if self.value: self.setText(\"Collapsed {}\".format(path_str)) else: self.setText(\"Expanded {}\".format(path_str)) class SetNodeExecuteSources(SetNodeAttributeValue): \"\"\"Set", "self.recomp = True created_node = True self.created_node_paths += [self.node_path] node", "self.model._set_node_pos(new_node_path, pos, layer=target_layer) self.model.selection = new_selection self.model.update_comp_layer(rebuild=True) if len(self.node_paths) ==", "\"\"\"Set attribute comment\"\"\" def __init__(self, node_path, attr_name, comment, model, layer_path):", "for the dirty nodes dirty_set = tuple(set(dirty)) self.undo_effected_layer(self.layer_path) if dirty_set", "'pos': self.model.get_node_pos(self.node_path), 'break': is_break} closest_ancestor = layer.ancestors(self.node_path) if closest_ancestor: closest_ancestor", "self.setText(\"Add breakpoint to {}\".format(path_str)) else: self.setText(\"Remove breakpoint from {}\".format(path_str)) class", "saving it. :param layer_just_saved: string of layer real path :return:", "self.stage.delete_node(n, target_layer, remove_layer_data=True) self.model.selection = self.prev_selection self.model.update_comp_layer(rebuild=True) self.undo_effected_layer(target_layer.real_path) @processing def", "value, model, layer_path) class SetNodeChildOrder(SetNodeAttributeValue): \"\"\"Set node child order\"\"\" def", "[node] # get current node hierarchy information for each node.", "# Get the data to be set if undo is", "parent_path=parent_path) new_path = nxt_path.join_node_paths(parent_path, new_name) self.new_path = new_path super(InstanceNode, self).__init__(new_path,", "model, chdir): super(ReferenceLayer, self).__init__(model) self.model = model self.stage = model.stage", "to {}\".format(self.old_node_path, self.return_value)) class DuplicateNodes(NxtCommand): \"\"\"Duplicate nodes on this graph\"\"\"", "if cur_cmd is self: return if layer_just_saved in self._layers_effected_by_me: if", "attr_name, model, layer_path): super(DeleteAttribute, self).__init__(node_path, attr_name, None, model, layer_path) #", "self.model.nodes_changed.emit(tuple(self.node_paths)) if len(self.node_paths) == 1: path_str = self.node_paths[0] else: path_str", "class RenameNode(SetNodeAttributeValue): \"\"\"Rename node\"\"\" def __init__(self, node_path, name, model, layer_path):", "SAVE_KEY.ALIAS: self.file_name } new_layer = self.stage.new_sublayer(layer_data=layer_data, idx=self.insert_idx) self.new_layer_path = new_layer.real_path", "our created empty nodes for node_path in self.created_node_paths: n =", "layer_path): \"\"\"Adds layer to the model's set of effected (unsaved)", "an existing layer\"\"\" def __init__(self, layer_path, model): super(SoloToggleLayer, self).__init__(model) self.layer_path", "= self.prev_selection def redo(self): self.created_node_paths = [] super(RevertNode, self).redo() layer", "self.old_color = '' self.model = model self.stage = model.stage @processing", "+= new_paths # self.model.node_added.emit(node_path) # preserve original data node_data['data'] =", "KeyError: # Removed by a save action pass self._layers_effected_by_me[layer_path] =", "return if not eff_by_undo: self._layers_effected_by_me[layer_path] = {'undo': False, 'redo': True}", "attribute value\"\"\" def __init__(self, node_path, attr_name, data, model, layer_path): super(SetNodeAttributeData,", "layer {}\".format(self.new_layer_path)) class ReferenceLayer(NxtCommand): \"\"\"Refernce existing layer\"\"\" def __init__(self, file_path,", "= new_path super(InstanceNode, self).__init__(new_path, INTERNAL_ATTRS.INSTANCE_PATH, node_path, model, target_layer_path) def redo(self):", "self.setText(\"Parent {} to {}\".format(path_str, self.parent_node_path)) class AddAttribute(SetNodeAttributeData): \"\"\"Add an attribute", "list must be shared by other DeleteNode commands in a", "eff_by_undo, eff_by_redo = self._get_effects(layer_just_saved) where_were_at = self.model.undo_stack.index() cur_cmd = self.model.undo_stack.command(max(0,", "super(SetNodeEnabledState, self).__init__(node_path, INTERNAL_ATTRS.ENABLED, value, model, layer_path) def redo(self): super(SetNodeEnabledState, self).redo()", "as an empty node new_nodes, new_paths, dirty = _add_node_hierarchy(self.node_path, self.model,", "curr_selection + paths super(AddSelection, self).__init__(new_paths, model) def redo(self): super(AddSelection, self).redo()", "= parent_node_path self.parent_node = None self.model = model self.stage =", "or [0.0, 0.0] self.prev_selection = self.model.selection # resulting node self.node_path", "= self.stage.add_node(name=name, data=attr_data, parent=parent_path, layer=layer.layer_idx(), comp_layer=comp, fix_names=False) # Fixme: Targeted", "eff_by_redo = False self.model.effected_layers.add(layer_path) elif eff_by_redo: try: self.model.effected_layers.remove(layer_path) except KeyError:", "different ways. :param layer_path: string of layer real path :return:", "\"\"\" eff_by_undo, eff_by_redo = self._get_effects(layer_path) layer_saved = layer_path not in", "don't want to fix names because we know this node", "self.remove_attr = True if not created_node: self.return_value = self.stage.node_setattr_data(node, self.attr_name,", "in self.node_paths: node = layer.lookup(node_path) top_node = self.stage.get_top_node(node, self.model.target_layer) if", "INTERNAL_ATTRS.REQUIRES_RECOMP self.return_value = None self.prev_selection = model.selection @processing def undo(self):", "layer_color_index[0] for c in layer_color_index: if c not in open_layer_colors:", "layer :param model: StageModel \"\"\" super(SetLayerColor, self).__init__(model) self.layer_path = layer_path", "= data.get('attributes', {}) attrs_to_keep = apply_data['attributes'].keys() apply_data['enabled'] = data.get('enabled') if", "a top layer the top layer store an overrides. :param", "attr_name, value, model, layer_path): data = {META_ATTRS.VALUE: value} super(AddAttribute, self).__init__(node_path,", "def redo(self): layer = self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_color", "layer_path, model): super(SetLayerAlias, self).__init__(model) self.layer_path = layer_path self.alias = alias", "self.layer_path = layer_path self.new_positions = node_positions self.old_positions = {} for", "common_parent_nodes = {} for old_path, node_data in self.prev_node_data.items(): prev_parent_path =", "= model.stage @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) if layer", "was saved and then undo was called, thus this redo", "= getattr(node, INTERNAL_ATTRS.NAME) is_break = self.model.get_is_node_breakpoint(self.node_path, layer) self.node_data = {'parent':", "model, layer_path, others): super(RevertNode, self).__init__(node_path, model, layer_path, others) self.rebuild =", "self.node_path_data = {} self.new_node_paths = [] self.created_node_paths = [] #", "changed = tuple([self.node_path] + self.created_node_paths) self.model.nodes_changed.emit(changed) self.model.selection = self.prev_selection #", "undo(self): super(RemoveLayer, self).redo() self.setText(self.text) @processing def redo(self): super(RemoveLayer, self).undo() self.setText(self.text)", "paths, model): super(SetSelection, self).__init__() self.new_paths = paths self.model = model", "layer=layer.layer_idx(), comp_layer=comp_layer, fix_names=False) if self.node_data['break']: self.model._add_breakpoint(self.node_path, layer) self.model._add_breakpoint(self.node_path, self.stage.top_layer) if", "so when # they are un-parented each node can be", "model): super(SoloToggleLayer, self).__init__(model) self.layer_path = layer_path self.model = model self.layer_paths", "self.prev_node_data[node_path] = node_data nodes += [node] # get current node", "the stage as it deletes node, this behavior is depended", "True self.created_node_paths += [self.node_path] node = layer.lookup(self.node_path) self.prev_data = self.stage.get_node_attr_data(node,", "data self.parent_path = parent_path self.layer_path = layer_path self.stage = model.stage", "self._layers_effected_by_me[layer_path]['redo'] except KeyError: pass return first_eff_by_undo, first_eff_by_redo def reset_layer_effected(self, layer_just_saved):", "[self.node_path] if node is None: parent_path = nxt_path.get_parent_path(self.node_path) name =", "self.new_node_paths: old_node_path = self.node_paths[idx] attr_state = self.model.remove_attr_display_state(old_node_path) if attr_state is", "wrapper class NxtCommand(QUndoCommand): def __init__(self, model): super(NxtCommand, self).__init__() self.model =", "in self.new_positions.keys(): self.old_positions[path] = model.get_node_pos(path) @processing def undo(self): layer =", "layer) idx = 0 for old_node_path in self.node_paths: new_node_path =", "= {} self.recomp = attr_name in INTERNAL_ATTRS.REQUIRES_RECOMP self.return_value = None", "comp_layer = self.model.comp_layer self.node_data = {} self.prev_starts = self.model.get_start_nodes(layer) self.prev_breaks", "__init__(self, node_path, child_order, model, layer_path): super(SetNodeChildOrder, self).__init__(node_path, INTERNAL_ATTRS.CHILD_ORDER, child_order, model,", "self.setText(\"Enabled {}\".format(self.node_path)) else: self.setText(\"Disabled {}\".format(self.node_path)) class SetNodeCollapse(NxtCommand): \"\"\"Set the node", "breakpoint to {}\".format(path_str)) else: self.setText(\"Remove breakpoint from {}\".format(path_str)) class ClearBreakpoints(QUndoCommand):", "def undo(self): layer = self.model.lookup_layer(self.layer_path) for node_path, old_pos in self.old_positions.items():", "node_path, prev_value in self.prev_values.items(): layer.collapse[node_path] = prev_value self.model.comp_layer.collapse[node_path] = prev_value", "of effected (unsaved) layers. If the layer is not marked", "np in self.node_paths: self.prev_values[np] = self.model.get_node_collapse(np, layer) for node_path in", "InstanceNode(SetNodeAttributeValue): \"\"\"Instance nodes on this graph\"\"\" def __init__(self, node_path, model,", "\"\"\"Set this node as the execution start point\"\"\" def __init__(self,", "closest_ancestor_path: ancestor_child_order = getattr(closest_ancestor, INTERNAL_ATTRS.CHILD_ORDER) self.node_data['ancestor_child_order'] = (closest_ancestor_path, ancestor_child_order[:]) #", "+= [self.node_path] node = layer.lookup(self.node_path) self.prev_data = self.stage.get_node_attr_data(node, self.attr_name, layer,", "\"\"\"Set node instance\"\"\" def __init__(self, node_path, instance_path, model, layer_path): super(SetNodeInstance,", "def __init__(self, node_path, model, layer_path): super(RevertCompute, self).__init__(node_path, INTERNAL_ATTRS.COMPUTE, [], model,", "self.prev_data = self.stage.get_node_attr_data(node, self.attr_name, layer, quiet=True) if self.prev_data: self.prev_data =", "self.undo_effected_layer(layer.real_path) def redo(self): # Overload remove attr here to insure", "str(self.node_paths) self.setText(\"Parent {} to {}\".format(path_str, self.parent_node_path)) class AddAttribute(SetNodeAttributeData): \"\"\"Add an", "{}\".format(attr_path)) class SetCompute(SetNodeAttributeValue): \"\"\"Set node code value\"\"\" def __init__(self, node_path,", "self.data.get(META_ATTRS.VALUE))) self.setText(txt) class SetNodeEnabledState(SetNodeAttributeValue): \"\"\"Set node enabled state\"\"\" def __init__(self,", "break point\"\"\" def __init__(self, node_paths, value, model, layer_path): super(SetNodeBreakPoint, self).__init__()", "0 for old_node_path in self.node_paths: new_node_path = self.new_node_paths[idx] attr_state =", "self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: self.old_alias = layer.get_alias(local=True) layer.set_alias(self.alias) else:", "= self.stage.node_setattr_data(node=n, attr=self.attr_name, layer=layer, create=False, comp_layer=comp, **self.prev_data) if self.attr_name ==", "self.model.layer_mute_changed.emit((self.layer_path,)) self.setText(\"Toggle {} muted.\".format(layer.get_alias())) class SoloToggleLayer(NxtCommand): \"\"\"Toggles soloing an existing", "= node_path self.node_data = {} self.others = other_removed_nodes @processing def", "= nxt_path.node_name_from_node_path(self.node_path) if self.attr_name in INTERNAL_ATTRS.ALL: self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name) attr_data", "for node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) if len(self.node_paths) ==", "= file_name self.chdir = chdir @processing def undo(self): new_layer =", "self.old_positions[path] = model.get_node_pos(path) @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) for", "sub_layer_count = len(self.stage._sub_layers) if 0 < self.insert_idx <= sub_layer_count: parent_layer", "source_layer = self.stage.get_node_source_layer(node) self.model._set_node_pos(old_path, prev_pos, layer=source_layer) # delete any created", "self.node_data['start']: self.model._remove_start_node(self.node_path, layer) node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if", "= {} for path in self.new_positions.keys(): self.old_positions[path] = model.get_node_pos(path) @processing", "a given layer with context to this command. Since a", "super(LocalizeInstancePath, self).redo() self.setText(\"Localize instance path to {}\".format(self.node_path)) class RevertInstancePath(SetNodeAttributeValue): def", "= layer_path self.alias = alias self.old_alias = '' self.model =", "new_layer = self.model.lookup_layer(self.new_layer_path) if new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.update_comp_layer(rebuild=True)", "comp_layer = self.model.comp_layer if node is not None: # delete", "dirty = _add_node_hierarchy(self.node_path, self.model, layer) self.created_node_paths += new_paths self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.model.selection", "self.node_data['save_dict'] = get_node_as_dict(node) if self.node_data['break']: self.model._remove_breakpoint(self.node_path, layer) self.model._remove_breakpoint(self.node_path, self.stage.top_layer) if", "store an overrides. :param color: string of new layer alias", "ancestor_path = parent_path child_order = [] if parent_node: child_order =", "= [] dirty_nodes = [] nodes, dirty = self.stage.add_node(name=self.name, data=self.data,", "all child nodes for undo self.prev_node_data = {} @processing def", "= None closest_ancestor_path = layer.get_node_path(closest_ancestor) if closest_ancestor_path: ancestor_child_order = getattr(closest_ancestor,", "n = layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer=layer,", "new_paths # self.model.node_added.emit(node_path) # preserve original data node_data['data'] = get_node_as_dict(target_node)", "remove_layer_data=False) super(RevertNode, self).undo() self.model.update_comp_layer(rebuild=True) self.model.selection = self.prev_selection def redo(self): self.created_node_paths", "self.model._remove_start_node(self.node_path, layer) node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx()", "self.value: self.setText(\"Add breakpoint to {}\".format(path_str)) else: self.setText(\"Remove breakpoint from {}\".format(path_str))", "self.parent_node_path != nxt_path.WORLD: prev_pos = node_data['pos'] source_layer = self.stage.get_node_source_layer(node) self.model._set_node_pos(old_path,", "self.others[:]: self.others += comp_layer.get_node_dirties(p) _, dirty = self.stage.delete_node(node, layer, comp_layer=comp_layer,", "{} to {}\".format(path_str, self.parent_node_path)) class AddAttribute(SetNodeAttributeData): \"\"\"Add an attribute to", "# original top node. node_hierarchy_data = {} if self.parent_node_path is", "{}\".format(self.node_path)) class LocalizeInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): inst_path =", "of layer real path :return: None \"\"\" layer_unsaved = layer_path", "list(self.node_path_data.values()) idx = 0 for new_node_path in self.new_node_paths: old_node_path =", "None layer_data = nxt_io.load_file_data(self.real_path) extra_data = {\"parent_layer\": parent_layer, \"filepath\": self.file_path,", "\"\"\"Set node enabled state\"\"\" def __init__(self, node_path, value, model, layer_path):", "a node\"\"\" def __init__(self, node_path, attr_name, model, layer_path): super(DeleteAttribute, self).__init__(node_path,", "index before this command and the same # layer was", "layer) target_node = new_nodes[-1] self.created_node_paths += new_paths # self.model.node_added.emit(node_path) #", "= [] source_layer = self.model.lookup_layer(self.source_layer_path) target_layer = self.model.lookup_layer(self.target_layer_path) self.redo_effected_layer(target_layer.real_path) for", "self.model.selection def undo(self): self.model.selection = self.prev_paths def redo(self): self.model.selection =", "= copy.deepcopy(self.prev_data) # set attribute value this also adds the", "ancestors[0] ancestor_path = layer.get_node_path(ancestor) child_order = self.stage.get_node_child_order(ancestor) node_data[INTERNAL_ATTRS.CHILD_ORDER] = [ancestor_path,", "= 0 for new_node_path in self.new_node_paths: old_node_path = self.node_paths[idx] attr_state", "class RenameAttribute(NxtCommand): \"\"\"Rename attribute\"\"\" def __init__(self, node_path, attr_name, new_attr_name, model,", "= nxt_path.make_attr_path(self.node_path, self.nice_attr_name) val = str(self.data.get(META_ATTRS.VALUE)) self.setText(\"Set {} to {}\".format(attr_path,", "collapse state\"\"\" def __init__(self, node_paths, value, model, layer_path): super(SetNodeCollapse, self).__init__(model)", "self.setText('Add {} to selection'.format(self.added_paths)) class RemoveFromSelection(SetSelection): def __init__(self, paths, model):", "= self.model.target_layer for node_path in self.node_paths: node_data = {} display_node", "layer_path self.prev_breaks = [] @processing def undo(self): user_dir.breakpoints[self.layer_path] = self.prev_breaks", "the model's set of effected (unsaved) layers. If this command", "model, layer_path): super(SetNodeCollapse, self).__init__(model) self.node_paths = node_paths self.value = value", "of this command effects the layer not the redo eff_by_redo", "data, model, layer_path) class RenameNode(SetNodeAttributeValue): \"\"\"Rename node\"\"\" def __init__(self, node_path,", "self.model.get_node_pos(node_path) pos = [pos[0] + 20, pos[1] + 20] self.model._set_node_pos(new_node_path,", "Nodes and Connections\"\"\" def __init__(self, paths, model): super(SetSelection, self).__init__() self.new_paths", "for node_path in self.node_paths: func(node_path, layer) self.model.nodes_changed.emit(tuple(self.node_paths)) @processing def redo(self):", "first to effect the layer we mark it as such", "layer, comp_layer=comp_layer, remove_layer_data=rm_layer_data, other_removed_nodes=self.others) dirty_nodes += dirty + [self.node_path] if", "self.setText(\"Revert exec input on {}\".format(self.node_path)) class RevertNode(DeleteNode): \"\"\"Localize nodes\"\"\" def", "self.old_node_path = node_path layer = model.lookup_layer(layer_path) parent_path = nxt_path.get_parent_path(node_path) new_name", "parent_node_path, model): super(ParentNodes, self).__init__(model) self.parent_node_path = parent_node_path self.parent_node = None", "model, source_layer_path, target_layer_path): # TODO: We should make another base", "= self.model.selection def undo(self): self.model.selection = self.prev_paths def redo(self): self.model.selection", "model, layer_path): data = {META_ATTRS.VALUE: value} super(SetNodeAttributeValue, self).__init__(node_path, attr_name, data,", "nodes for new_node in new: # add new node path", "selection: {}'.format(str(self.new_paths))) class AddSelection(SetSelection): def __init__(self, paths, model): self.added_paths =", "= attr_name self.data = data self.stage = model.stage self.layer_path =", "new_path in list(self.model.top_layer.positions.keys()): source_layer = self.stage.get_node_source_layer(node) source_layer.positions.pop(new_path) for parent_path, nodes_dict", "layer_path): super(SetNodeBreakPoint, self).__init__() self.node_paths = node_paths self.value = value self.model", "comp_layer.lookup(nn_p) if display_node is not None: display_child_order = getattr(display_node, INTERNAL_ATTRS.CHILD_ORDER)", "INTERNAL_ATTRS.ALL: self.return_value = INTERNAL_ATTRS.as_save_key(self.attr_name) attr_data = {self.return_value: self.data.get(META_ATTRS.VALUE)} else: attr_data", "def undo(self): layer = self.model.lookup_layer(self.layer_path) # Remove our created empty", "self.attr_name) self.undo_effected_layer(layer.real_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer, self.attr_name,", "class NxtCommand(QUndoCommand): def __init__(self, model): super(NxtCommand, self).__init__() self.model = model", "self.created_node_paths = [] # get the node node = layer.lookup(self.node_path)", "nodes_str = 'nodes' self.setText('Move {} {}'.format(nodes_str, delta_str)) self.redo_effected_layer(layer.real_path) class SetSelection(QUndoCommand):", "to {}\".format(path_str)) else: self.setText(\"Remove breakpoint from {}\".format(path_str)) class ClearBreakpoints(QUndoCommand): \"\"\"Clear", "self.model.target_layer.lookup(node_path) if n is not None: self.stage.delete_node(n, layer=self.model.target_layer, remove_layer_data=False) layers", "False, 'redo': True} self.model.effected_layers.add(layer_path) else: # Layer was saved and", "super(SetSelection, self).__init__() self.new_paths = paths self.model = model self.prev_paths =", "model, layer_path): super(DeleteAttribute, self).__init__(node_path, attr_name, None, model, layer_path) # Get", "prev_name: self.stage.set_node_name(node, name=prev_name, layer=layer, force=True) # restore position if self.parent_node_path", "[] layer = self.model.target_layer self.redo_effected_layer(layer.real_path) for node_path in self.node_paths: node", "attr_data = {nxt_io.SAVE_KEY.ATTRS: {self.attr_name: self.data}} self.return_value = self.attr_name _, dirties", "= fix_selection self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.redo_effected_layer(layer.real_path) self.setText(\"Delete node: {}\".format(self.node_path)) class SetNodeAttributeData(NxtCommand): \"\"\"Set", "graph\"\"\" def __init__(self, node_path, model, source_layer_path, target_layer_path): src_name = nxt_path.node_name_from_node_path(node_path)", "self.node_paths[0] else: path_str = str(self.node_paths) self.setText('Localize {}'.format(str(path_str))) class LocalizeUserAttr(SetNodeAttributeData): \"\"\"Localize", "self.created_node_paths = [] @processing def undo(self): layer = self.model.lookup_layer(self.layer_path) dirty_nodes", "INTERNAL_ATTRS.CHILD_ORDER, ancestor_child_order) self.model.selection = self.prev_selection # Fixme: Does not account", "in open_layer_colors: layer_color = c break real_path = nxt_path.full_file_expand(self.file_path, start=self.chdir)", "self.prev_selection # Fixme: Does not account for rebuilding proxy nodes", "layer_color_index = [str(k.name()) for k in colors.LAYER_COLORS] open_layer_colors = []", "self.node_path_data = {} self.new_node_paths = [] self.created_node_paths = [] nodes", "file_path, idx, model, chdir): super(ReferenceLayer, self).__init__(model) self.model = model self.stage", "self.model.lookup_layer(self.layer_path) if layer is self.model.top_layer: layer.color = self.old_color else: layer.set_color_over(self.old_color)", "[self.node_path] if self.model.get_is_node_start(self.node_path, self.model.comp_layer): self.model.starts_changed.emit(self.model.get_start_nodes()) self.setText(\"{} renamed to {}\".format(self.old_node_path, self.return_value))", "each build once self.model.update_comp_layer(rebuild=True) self.model.set_target_layer(self.new_layer_path) self.model.layer_added.emit(self.new_layer_path) self.setText(\"New layer {}\".format(self.new_layer_path)) class", "self).__init__(new_selection, model) def redo(self): super(RemoveFromSelection, self).redo() self.setText('Remove {} from selection'.format(self.rem_paths))", "reset the class attr `_first_effected_by_redo` to False. This makes sure", "self.old_node_path self.model.selection = [self.node_path] def redo(self): self.model.about_to_rename.emit() super(RenameNode, self).redo() self.node_path", "LocalizeExecPath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): exec_path = model.get_node_exec_in(node_path) super(LocalizeExecPath,", "start point\"\"\" def __init__(self, node_path, value, model, layer_path): super(SetNodeStartPoint, self).__init__(node_path,", "= self.prev_selection @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.created_node_paths =", "getattr(node, INTERNAL_ATTRS.PARENT_PATH) self.stage.get_node_data(node, layer) node_data = self.stage.get_node_data(node, layer) node_data['pos'] =", "on {}\".format(self.node_path)) class SetNodeComment(SetNodeAttributeValue): \"\"\"Set node comment\"\"\" def __init__(self, node_path,", "None: _, dirty = self.stage.delete_node(node, layer, remove_layer_data=False) dirty_nodes += dirty", "the layer path and the comp layer. It is important", "for {}\".format(self.node_path)) return self.setText(\"Set {} exec input to {}\".format(self.node_path, val))", "the top layer store an overrides. :param color: string of", "node: {}'.format(self.node_path)) class DeleteNode(NxtCommand): def __init__(self, node_path, model, layer_path, other_removed_nodes):", "# net zero effect on the layer try: self.model.effected_layers.remove(layer_path) except", "eff_by_undo = False else: # Now the undo of this", "None: self.model._set_attr_display_state(self.node_path, attr_display) user_dir.breakpoints = self.prev_breaks ancestor_tuple = self.node_data.get('ancestor_child_order') if", "attrs_to_keep = apply_data['attributes'].keys() apply_data['enabled'] = data.get('enabled') if data.get('instance'): apply_data['instance'] =", "except KeyError: # Removed by a save action pass self._layers_effected_by_me[layer_path]", "nodes_dict in common_parent_nodes.items(): for node, old_path in nodes_dict.items(): node_data =", "self.model.layer_removed.emit(self.real_path) @processing def redo(self): sub_layer_count = len(self.stage._sub_layers) if 0 <", "else: # Layer was saved and then undo was called,", "if ancestor_tuple: ancestor_path, ancestor_child_order = ancestor_tuple ancestor = layer.lookup(ancestor_path) if", "model): self.rem_paths = paths new_selection = model.selection[:] for path in", "layer_path): data = {META_ATTRS.VALUE: value} super(AddAttribute, self).__init__(node_path, attr_name, data, model,", "class LocalizeInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): inst_path = model.get_node_instance_path(node_path,", "LocalizeInstancePath(SetNodeAttributeValue): def __init__(self, node_path, model, layer_path): inst_path = model.get_node_instance_path(node_path, model.comp_layer,", "for layer_path in self.layer_paths: self.redo_effected_layer(layer_path) @processing def toggle_state(self): layer =", "not layer.get_soloed(local=False) self.model.top_layer.set_solo_over(layer.filepath, state) self.layer_paths.append(self.model.top_layer.real_path) self.model.update_comp_layer(rebuild=True) self.model.layer_solo_changed.emit((self.layer_path,)) self.setText(\"Toggle {} soloed.\".format(layer.get_alias()))", "[n] return new_nodes, new_node_paths, dirty def undo_debug(cmd, start): update_time =", "import LAYERS, SAVE_KEY from nxt.nxt_node import (INTERNAL_ATTRS, META_ATTRS, get_node_as_dict, list_merger)", "+= self.created_node_paths dirty_nodes += [self.node_path] self.undo_effected_layer(self.layer_path) self.model.nodes_changed.emit(tuple(set(dirty_nodes))) self.model.selection = self.prev_selection", "for node_path in self.new_node_paths: n = target_layer.lookup(node_path) if n is", "node_paths, parent_node_path, model): super(ParentNodes, self).__init__(model) self.parent_node_path = parent_node_path self.parent_node =", "cur_cmd = self.model.undo_stack.command(max(0, where_were_at - 1)) if cur_cmd is self:", "= 0 for old_node_path in self.node_paths: new_node_path = self.new_node_paths[idx] attr_state", "= self.new_node_paths[idx] attr_state = self.model.remove_attr_display_state(new_node_path) if attr_state is not None:", "def reset_layer_effected(self, layer_just_saved): \"\"\"When the model marks a layer as", "changed_attrs += (attr_path,) if self.recomp: self.model.update_comp_layer(rebuild=self.recomp) else: if (self.remove_attr or", "layer_path) def redo(self): super(RevertInstancePath, self).redo() self.setText(\"Revert instance path on {}\".format(self.node_path))", "self.stage.transfer_node_data(target_node, self.model.target_layer, display_node, self.model.comp_layer) self.prev_node_data[node_path] = node_data self.model.update_comp_layer(rebuild=bool(self.created_node_paths)) self.redo_effected_layer(layer.real_path) self.model.selection", "if not display_node: continue # add node if it doesn't", "model, layer_path): exec_path = model.get_node_exec_in(node_path) super(LocalizeExecPath, self).__init__(node_path, INTERNAL_ATTRS.EXECUTE_IN, exec_path, model,", "parent_node: child_order = getattr(parent_node, INTERNAL_ATTRS.CHILD_ORDER) else: ancestors = layer.ancestors(node_path) if", "layer_path) def redo(self): super(LocalizeCompute, self).redo() self.setText(\"Localize compute on {}\".format(self.node_path)) class", "in self.stage._sub_layers: color = layer.color if color: color = color.lower()", "nodes dirty_set = tuple(set(dirty)) self.undo_effected_layer(self.layer_path) if dirty_set != (self.node_path,): self.model.update_comp_layer(rebuild=True)", "model, layer_path) class LocalizeCompute(SetNodeAttributeValue): \"\"\"Localize nodes\"\"\" def __init__(self, node_path, model,", "self.parent_path = parent_path self.layer_path = layer_path self.stage = model.stage #", "layer in redo... self.undo_effected_layer(self.stage.top_layer.real_path) attr_display = self.node_data.get('attr_display') if attr_display is", "self.model.selection = new_selection self.model.update_comp_layer(rebuild=True) if len(self.node_paths) == 1: nodes_str =", "pass return first_eff_by_undo, first_eff_by_redo def reset_layer_effected(self, layer_just_saved): \"\"\"When the model", "= layer.lookup(prev_parent_path) new_path = self.node_path_data[old_path] node = layer.lookup(new_path) if prev_parent_path", "logger.debug(\"Undo \" + cmd.text() + \" | \" + update_time", "setting the class attr `_first_effected_by_redo` to True. :param layer_path: string", "class SetNodesPosition(NxtCommand): \"\"\"Move nodes\"\"\" def __init__(self, node_positions, model, layer_path): super(SetNodesPosition,", "attr_name, model, layer_path): node = model.comp_layer.lookup(node_path) data = model.stage.get_node_attr_data(node, attr_name,", "def __init__(self, node_path, model, layer_path): comp_layer = model.comp_layer display_node =", "self.new_attr_name, self.attr_name) self.undo_effected_layer(layer.real_path) @processing def redo(self): layer = self.model.lookup_layer(self.layer_path) self.rename_attribute(layer,", "selection'.format(self.added_paths)) class RemoveFromSelection(SetSelection): def __init__(self, paths, model): self.rem_paths = paths", "model.stage self.insert_idx = idx self.file_path = file_path self.file_name = file_name", "# was moved to an index before this command and", "other_removed_nodes): \"\"\"Delete node from the layer at the layer path", "INTERNAL_ATTRS.EXECUTE_IN, exec_path, model, layer_path) def redo(self): super(LocalizeExecPath, self).redo() self.setText(\"Localize exec", "+= [n] return new_nodes, new_node_paths, dirty def undo_debug(cmd, start): update_time", "new_layer in self.stage._sub_layers: self.undo_effected_layer(new_layer.parent_layer.real_path) self.stage.remove_sublayer(new_layer) self.model.set_target_layer(LAYERS.TOP) self.model.update_comp_layer(rebuild=True) self.model.layer_removed.emit(self.real_path) @processing def", "+= dirty node = layer.lookup(self.node_path) source_layer = self.stage.get_node_source_layer(node) if source_layer.layer_idx()", "layer we mark it as such by setting the class", "< self.insert_idx <= sub_layer_count: parent_layer = self.stage._sub_layers[self.insert_idx - 1] self.redo_effected_layer(parent_layer.real_path)", "child nodes for undo self.prev_node_data = {} @processing def undo(self):", "effect layers in different ways. :param layer_path: string of layer", "in new: # add new node path to the list", "def redo(self): super(SetNodeExecuteSources, self).redo() val = self.data.get(META_ATTRS.VALUE) if val is", "INTERNAL_ATTRS.INSTANCE_PATH, inst_path, model, layer_path) def redo(self): super(LocalizeInstancePath, self).redo() self.setText(\"Localize instance", "`_first_effected_by_redo` to True. :param layer_path: string of layer real path" ]
[ "results') def get_informations(kanton): search = frappe.db.sql(\"\"\" SELECT `informationen`, `homepage`, `gesetzessammlung`,", "= kwargs['token'] except: # 400 Bad Request (Missing Token) return", "data['plz'] = city.plz data['ort'] = city.city data['gemeinde'] = city.municipality data['bezirk']", "message): # 4xx Bad Request / Unauthorized / Not Found", "no_letterhead}) return def _get_sammel_pdf(no_letterhead=1): output = PdfFileWriter() schlichtungsbehoerden = frappe.db.sql(\"\"\"SELECT", "see license.txt from __future__ import unicode_literals import frappe from frappe.model.document", "call on [IP]/api/method/mietrechtspraxis.api.get_sb Mandatory Parameter: - token - plz '''", "city.plz data['ort'] = city.city data['gemeinde'] = city.municipality data['bezirk'] = city.district", "\"\"\".format(kanton=kanton), as_dict=True) if len(search) > 0: result = search[0] else:", "= '{kanton}' \"\"\".format(kanton=kanton), as_dict=True) if len(search) > 0: result =", "Schlichtungsbehörden', as_pdf = True, output = output, no_letterhead = no_letterhead)", "AS `Kündigungstermine`, `schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben` AS", "`Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`, `schlichtungsbehoerde`.`homepage` AS", "`district`, `canton` FROM `tabPincode` WHERE `pincode` = '{plz_city}' ORDER BY", "from frappe.utils.file_manager import save_file class ArbitrationAuthority(Document): pass def _get_sb(**kwargs): '''", "import save_file class ArbitrationAuthority(Document): pass def _get_sb(**kwargs): ''' call on", "schlichtungsbehoerde in schlichtungsbehoerden: output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf", "400 Bad Request (Missing Token) return raise_4xx(400, 'Bad Request', 'Token", "= frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf = True, output =", "AS `schlichtungsbehoerde` LEFT JOIN `tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent`", "for city in city_results: data = {} data['plz'] = city.plz", "`Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`, `schlichtungsbehoerde`.`homepage` AS `Homepage` FROM `tabArbitration", "pdf = frappe.utils.pdf.get_file_data_from_writer(output) now = datetime.now() ts = \"{0:04d}-{1:02d}-{2:02d}\".format(now.year, now.month,", "__future__ import unicode_literals import frappe from frappe.model.document import Document from", "ArbitrationAuthority(Document): pass def _get_sb(**kwargs): ''' call on [IP]/api/method/mietrechtspraxis.api.get_sb Mandatory Parameter:", "as_pdf = True, output = output, no_letterhead = no_letterhead) pdf", "'Bad Request', 'PLZ/City Required') answer = [] # lookup for", "`schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`, `schlichtungsbehoerde`.`homepage` AS `Homepage`", "data['allgemein'] = get_informations(city.canton) data['schlichtungsbehoerde'] = frappe.db.sql(\"\"\" SELECT `schlichtungsbehoerde`.`titel` AS `Titel`,", "# 401 Unauthorized (Invalid Token) return raise_4xx(401, 'Unauthorized', 'Invalid Token')", "= frappe.db.sql(\"\"\"SELECT `name` FROM `tabArbitration Authority`\"\"\", as_dict=True) for schlichtungsbehoerde in", "ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) < 1: # lookup for", "`schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`, `schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben`", "len(answer) > 0: return raise_200(answer) else: # 404 Not Found", "if len(city_results) > 0: for city in city_results: data =", "data['kanton'] = city.canton data['allgemein'] = get_informations(city.canton) data['schlichtungsbehoerde'] = frappe.db.sql(\"\"\" SELECT", "Request / Unauthorized / Not Found return ['{code} {title}'.format(code=code, title=title),", "that token is present try: token = kwargs['token'] except: #", "AS `Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`, `schlichtungsbehoerde`.`homepage` AS `Homepage` FROM", "4xx Bad Request / Unauthorized / Not Found return ['{code}", "and contributors # For license information, please see license.txt from", "`tabKantonsinformationen` WHERE `kanton` = '{kanton}' \"\"\".format(kanton=kanton), as_dict=True) if len(search) >", "Authority`\"\"\", as_dict=True) for schlichtungsbehoerde in schlichtungsbehoerden: output = frappe.get_print(\"Arbitration Authority\",", "FROM `tabArbitration Authority` AS `schlichtungsbehoerde` LEFT JOIN `tabMunicipality Table` AS", "# For license information, please see license.txt from __future__ import", "city in city_results: data = {} data['plz'] = city.plz data['ort']", "(c) 2021, libracore AG and contributors # For license information,", "raise_4xx(400, 'Bad Request', 'Token Required') # check that token is", "Authority\", schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden', as_pdf = True, output =", "except: # 400 Bad Request (Missing PLZ/City) return raise_4xx(400, 'Bad", "get_sammel_pdf(no_letterhead=1): frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead}) return def _get_sammel_pdf(no_letterhead=1):", "from frappe.model.document import Document from datetime import datetime from PyPDF2", "PyPDF2 import PdfFileWriter from frappe.utils.file_manager import save_file class ArbitrationAuthority(Document): pass", "Found return ['{code} {title}'.format(code=code, title=title), { \"error\": { \"code\": code,", "= True, output = output, no_letterhead = no_letterhead) output =", "token - plz ''' # check that token is present", "Request', 'Token Required') # check that token is correct if", "# check that plz_city is present try: plz_city = kwargs['plz_city']", "}] def raise_200(answer): return ['200 OK', answer] @frappe.whitelist() def get_sammel_pdf(no_letterhead=1):", "`tabPincode` WHERE `pincode` = '{plz_city}' ORDER BY `city` ASC \"\"\".format(plz_city=plz_city),", "on [IP]/api/method/mietrechtspraxis.api.get_sb Mandatory Parameter: - token - plz ''' #", "`city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `city` LIKE '%{plz_city}%'", "def _get_sammel_pdf(no_letterhead=1): output = PdfFileWriter() schlichtungsbehoerden = frappe.db.sql(\"\"\"SELECT `name` FROM", "Found', 'No results') def get_informations(kanton): search = frappe.db.sql(\"\"\" SELECT `informationen`,", "_get_sb(**kwargs): ''' call on [IP]/api/method/mietrechtspraxis.api.get_sb Mandatory Parameter: - token -", "`tabArbitration Authority`\"\"\", as_dict=True) for schlichtungsbehoerde in schlichtungsbehoerden: output = frappe.get_print(\"Arbitration", "def raise_200(answer): return ['200 OK', answer] @frappe.whitelist() def get_sammel_pdf(no_letterhead=1): frappe.enqueue(method=_get_sammel_pdf,", "from __future__ import unicode_literals import frappe from frappe.model.document import Document", "= output, no_letterhead = no_letterhead) pdf = frappe.utils.pdf.get_file_data_from_writer(output) now =", "# 400 Bad Request (Missing Token) return raise_4xx(400, 'Bad Request',", "= {} data['plz'] = city.plz data['ort'] = city.city data['gemeinde'] =", "`name` FROM `tabArbitration Authority`\"\"\", as_dict=True) for schlichtungsbehoerde in schlichtungsbehoerden: output", "information, please see license.txt from __future__ import unicode_literals import frappe", "no_letterhead = no_letterhead) pdf = frappe.utils.pdf.get_file_data_from_writer(output) now = datetime.now() ts", "{} return result def raise_4xx(code, title, message): # 4xx Bad", "ORDER BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) > 0:", "correct if not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'): # 401", "\"\"\".format(municipality=city.municipality), as_dict=True) answer.append(data) if len(answer) > 0: return raise_200(answer) else:", "(Missing Token) return raise_4xx(400, 'Bad Request', 'Token Required') # check", "Document from datetime import datetime from PyPDF2 import PdfFileWriter from", "raise_4xx(401, 'Unauthorized', 'Invalid Token') # check that plz_city is present", "= get_informations(city.canton) data['schlichtungsbehoerde'] = frappe.db.sql(\"\"\" SELECT `schlichtungsbehoerde`.`titel` AS `Titel`, `schlichtungsbehoerde`.`telefon`", "Parameter: - token - plz ''' # check that token", "'Token Required') # check that token is correct if not", "Found', 'No results') else: # 404 Not Found return raise_4xx(404,", "raise_4xx(code, title, message): # 4xx Bad Request / Unauthorized /", "SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `pincode` =", "ORDER BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) < 1:", "/ Unauthorized / Not Found return ['{code} {title}'.format(code=code, title=title), {", "{ \"code\": code, \"message\": \"{message}\".format(message=message) } }] def raise_200(answer): return", "return ['200 OK', answer] @frappe.whitelist() def get_sammel_pdf(no_letterhead=1): frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden", "result = search[0] else: result = {} return result def", "in city_results: data = {} data['plz'] = city.plz data['ort'] =", "-*- # Copyright (c) 2021, libracore AG and contributors #", "`schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE `geminendentbl`.`municipality` = '{municipality}' \"\"\".format(municipality=city.municipality), as_dict=True) answer.append(data) if len(answer)", "license information, please see license.txt from __future__ import unicode_literals import", "Authority` AS `schlichtungsbehoerde` LEFT JOIN `tabMunicipality Table` AS `geminendentbl` ON", "400 Bad Request (Missing PLZ/City) return raise_4xx(400, 'Bad Request', 'PLZ/City", "= \"{0:04d}-{1:02d}-{2:02d}\".format(now.year, now.month, now.day) file_name = \"{0}_{1}.pdf\".format('SB_Sammel-PDF', ts) save_file(file_name, pdf,", "= no_letterhead) pdf = frappe.utils.pdf.get_file_data_from_writer(output) now = datetime.now() ts =", "else: # 404 Not Found return raise_4xx(404, 'Not Found', 'No", "'%{plz_city}%' ORDER BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) >", "check that token is present try: token = kwargs['token'] except:", "output, no_letterhead = no_letterhead) pdf = frappe.utils.pdf.get_file_data_from_writer(output) now = datetime.now()", "AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE `geminendentbl`.`municipality` = '{municipality}' \"\"\".format(municipality=city.municipality), as_dict=True)", "schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden', as_pdf = True, output = output,", "file_name = \"{0}_{1}.pdf\".format('SB_Sammel-PDF', ts) save_file(file_name, pdf, '', '', is_private=1) return", "\"message\": \"{message}\".format(message=message) } }] def raise_200(answer): return ['200 OK', answer]", "Not Found return ['{code} {title}'.format(code=code, title=title), { \"error\": { \"code\":", "**{'no_letterhead': no_letterhead}) return def _get_sammel_pdf(no_letterhead=1): output = PdfFileWriter() schlichtungsbehoerden =", "save_file class ArbitrationAuthority(Document): pass def _get_sb(**kwargs): ''' call on [IP]/api/method/mietrechtspraxis.api.get_sb", "return raise_4xx(400, 'Bad Request', 'PLZ/City Required') answer = [] #", "output = output, no_letterhead = no_letterhead) output = frappe.get_print(\"Arbitration Authority\",", "SELECT `schlichtungsbehoerde`.`titel` AS `Titel`, `schlichtungsbehoerde`.`telefon` AS `Telefon`, `schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`,", "\"code\": code, \"message\": \"{message}\".format(message=message) } }] def raise_200(answer): return ['200", "frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf = True, output = output,", "frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead}) return def _get_sammel_pdf(no_letterhead=1): output", "def _get_sb(**kwargs): ''' call on [IP]/api/method/mietrechtspraxis.api.get_sb Mandatory Parameter: - token", "AG and contributors # For license information, please see license.txt", "except: # 400 Bad Request (Missing Token) return raise_4xx(400, 'Bad", "get_informations(city.canton) data['schlichtungsbehoerde'] = frappe.db.sql(\"\"\" SELECT `schlichtungsbehoerde`.`titel` AS `Titel`, `schlichtungsbehoerde`.`telefon` AS", "OK', answer] @frappe.whitelist() def get_sammel_pdf(no_letterhead=1): frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead':", "queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead}) return def _get_sammel_pdf(no_letterhead=1): output =", "license.txt from __future__ import unicode_literals import frappe from frappe.model.document import", "= True, output = output, no_letterhead = no_letterhead) pdf =", "Authority\", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf = True, output = output, no_letterhead", "[IP]/api/method/mietrechtspraxis.api.get_sb Mandatory Parameter: - token - plz ''' # check", "token is correct if not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'):", "= frappe.db.sql(\"\"\" SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE", "get_informations(kanton): search = frappe.db.sql(\"\"\" SELECT `informationen`, `homepage`, `gesetzessammlung`, `formulare` FROM", "libracore AG and contributors # For license information, please see", "`Telefon`, `schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`, `schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`,", "`district`, `canton` FROM `tabPincode` WHERE `city` LIKE '%{plz_city}%' ORDER BY", "now = datetime.now() ts = \"{0:04d}-{1:02d}-{2:02d}\".format(now.year, now.month, now.day) file_name =", "For license information, please see license.txt from __future__ import unicode_literals", "not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'): # 401 Unauthorized (Invalid", "401 Unauthorized (Invalid Token) return raise_4xx(401, 'Unauthorized', 'Invalid Token') #", "now.month, now.day) file_name = \"{0}_{1}.pdf\".format('SB_Sammel-PDF', ts) save_file(file_name, pdf, '', '',", "return raise_4xx(404, 'Not Found', 'No results') else: # 404 Not", "frappe.db.get_single_value('mietrechtspraxis API', 'token'): # 401 Unauthorized (Invalid Token) return raise_4xx(401,", "city.city data['gemeinde'] = city.municipality data['bezirk'] = city.district data['kanton'] = city.canton", "Bad Request / Unauthorized / Not Found return ['{code} {title}'.format(code=code,", "''' call on [IP]/api/method/mietrechtspraxis.api.get_sb Mandatory Parameter: - token - plz", "return ['{code} {title}'.format(code=code, title=title), { \"error\": { \"code\": code, \"message\":", "datetime import datetime from PyPDF2 import PdfFileWriter from frappe.utils.file_manager import", "kwargs['plz_city'] except: # 400 Bad Request (Missing PLZ/City) return raise_4xx(400,", "`canton` FROM `tabPincode` WHERE `city` LIKE '%{plz_city}%' ORDER BY `city`", "`municipality`, `district`, `canton` FROM `tabPincode` WHERE `pincode` = '{plz_city}' ORDER", "FROM `tabPincode` WHERE `city` LIKE '%{plz_city}%' ORDER BY `city` ASC", "plz ''' # check that token is present try: token", "city.municipality data['bezirk'] = city.district data['kanton'] = city.canton data['allgemein'] = get_informations(city.canton)", "\"{0:04d}-{1:02d}-{2:02d}\".format(now.year, now.month, now.day) file_name = \"{0}_{1}.pdf\".format('SB_Sammel-PDF', ts) save_file(file_name, pdf, '',", "try: plz_city = kwargs['plz_city'] except: # 400 Bad Request (Missing", "for city city_results = frappe.db.sql(\"\"\" SELECT `city`, `municipality`, `district`, `canton`", "lookup for plz city_results = frappe.db.sql(\"\"\" SELECT `city`, `municipality`, `district`,", "def get_informations(kanton): search = frappe.db.sql(\"\"\" SELECT `informationen`, `homepage`, `gesetzessammlung`, `formulare`", "contributors # For license information, please see license.txt from __future__", "return raise_4xx(400, 'Bad Request', 'Token Required') # check that token", "\"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) < 1: # lookup for city", "raise_4xx(404, 'Not Found', 'No results') else: # 404 Not Found", "frappe.utils.pdf.get_file_data_from_writer(output) now = datetime.now() ts = \"{0:04d}-{1:02d}-{2:02d}\".format(now.year, now.month, now.day) file_name", "['200 OK', answer] @frappe.whitelist() def get_sammel_pdf(no_letterhead=1): frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF',", "# 404 Not Found return raise_4xx(404, 'Not Found', 'No results')", "WHERE `geminendentbl`.`municipality` = '{municipality}' \"\"\".format(municipality=city.municipality), as_dict=True) answer.append(data) if len(answer) >", "data['bezirk'] = city.district data['kanton'] = city.canton data['allgemein'] = get_informations(city.canton) data['schlichtungsbehoerde']", "check that plz_city is present try: plz_city = kwargs['plz_city'] except:", "frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden', as_pdf = True, output", "- plz ''' # check that token is present try:", "len(city_results) > 0: for city in city_results: data = {}", "'token'): # 401 Unauthorized (Invalid Token) return raise_4xx(401, 'Unauthorized', 'Invalid", "WHERE `kanton` = '{kanton}' \"\"\".format(kanton=kanton), as_dict=True) if len(search) > 0:", "frappe.db.sql(\"\"\" SELECT `informationen`, `homepage`, `gesetzessammlung`, `formulare` FROM `tabKantonsinformationen` WHERE `kanton`", "title, message): # 4xx Bad Request / Unauthorized / Not", "SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `city` LIKE", "no_letterhead = no_letterhead) output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Fragebogen für", "import frappe from frappe.model.document import Document from datetime import datetime", "404 Not Found return raise_4xx(404, 'Not Found', 'No results') def", "kwargs['token'] except: # 400 Bad Request (Missing Token) return raise_4xx(400,", "1: # lookup for city city_results = frappe.db.sql(\"\"\" SELECT `city`,", "= city.municipality data['bezirk'] = city.district data['kanton'] = city.canton data['allgemein'] =", "'Datenüberprüfung', as_pdf = True, output = output, no_letterhead = no_letterhead)", "Mandatory Parameter: - token - plz ''' # check that", "data['schlichtungsbehoerde'] = frappe.db.sql(\"\"\" SELECT `schlichtungsbehoerde`.`titel` AS `Titel`, `schlichtungsbehoerde`.`telefon` AS `Telefon`,", "import Document from datetime import datetime from PyPDF2 import PdfFileWriter", "try: token = kwargs['token'] except: # 400 Bad Request (Missing", "`Homepage` FROM `tabArbitration Authority` AS `schlichtungsbehoerde` LEFT JOIN `tabMunicipality Table`", "as_dict=True) if len(search) > 0: result = search[0] else: result", "# lookup for plz city_results = frappe.db.sql(\"\"\" SELECT `city`, `municipality`,", "(Invalid Token) return raise_4xx(401, 'Unauthorized', 'Invalid Token') # check that", "PLZ/City) return raise_4xx(400, 'Bad Request', 'PLZ/City Required') answer = []", "Required') answer = [] # lookup for plz city_results =", "{ \"error\": { \"code\": code, \"message\": \"{message}\".format(message=message) } }] def", "present try: token = kwargs['token'] except: # 400 Bad Request", "return result def raise_4xx(code, title, message): # 4xx Bad Request", "token == frappe.db.get_single_value('mietrechtspraxis API', 'token'): # 401 Unauthorized (Invalid Token)", "`Kündigungstermine`, `schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische", "2021, libracore AG and contributors # For license information, please", "True, output = output, no_letterhead = no_letterhead) pdf = frappe.utils.pdf.get_file_data_from_writer(output)", "return raise_4xx(401, 'Unauthorized', 'Invalid Token') # check that plz_city is", "'PLZ/City Required') answer = [] # lookup for plz city_results", "is correct if not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'): #", "frappe.db.sql(\"\"\"SELECT `name` FROM `tabArbitration Authority`\"\"\", as_dict=True) for schlichtungsbehoerde in schlichtungsbehoerden:", "from PyPDF2 import PdfFileWriter from frappe.utils.file_manager import save_file class ArbitrationAuthority(Document):", "# 400 Bad Request (Missing PLZ/City) return raise_4xx(400, 'Bad Request',", "AS `Homepage` FROM `tabArbitration Authority` AS `schlichtungsbehoerde` LEFT JOIN `tabMunicipality", "Token') # check that plz_city is present try: plz_city =", "# -*- coding: utf-8 -*- # Copyright (c) 2021, libracore", "frappe.db.sql(\"\"\" SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `city`", "= '{municipality}' \"\"\".format(municipality=city.municipality), as_dict=True) answer.append(data) if len(answer) > 0: return", "token = kwargs['token'] except: # 400 Bad Request (Missing Token)", "# lookup for city city_results = frappe.db.sql(\"\"\" SELECT `city`, `municipality`,", "Unauthorized / Not Found return ['{code} {title}'.format(code=code, title=title), { \"error\":", "`city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) > 0: for city", "no_letterhead) output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden', as_pdf", "= city.plz data['ort'] = city.city data['gemeinde'] = city.municipality data['bezirk'] =", "`gesetzessammlung`, `formulare` FROM `tabKantonsinformationen` WHERE `kanton` = '{kanton}' \"\"\".format(kanton=kanton), as_dict=True)", "- token - plz ''' # check that token is", "= PdfFileWriter() schlichtungsbehoerden = frappe.db.sql(\"\"\"SELECT `name` FROM `tabArbitration Authority`\"\"\", as_dict=True)", "import unicode_literals import frappe from frappe.model.document import Document from datetime", "Request', 'PLZ/City Required') answer = [] # lookup for plz", "Request (Missing PLZ/City) return raise_4xx(400, 'Bad Request', 'PLZ/City Required') answer", "plz city_results = frappe.db.sql(\"\"\" SELECT `city`, `municipality`, `district`, `canton` FROM", "= no_letterhead) output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden',", "for plz city_results = frappe.db.sql(\"\"\" SELECT `city`, `municipality`, `district`, `canton`", "'No results') else: # 404 Not Found return raise_4xx(404, 'Not", "search[0] else: result = {} return result def raise_4xx(code, title,", "data = {} data['plz'] = city.plz data['ort'] = city.city data['gemeinde']", "= search[0] else: result = {} return result def raise_4xx(code,", "\"error\": { \"code\": code, \"message\": \"{message}\".format(message=message) } }] def raise_200(answer):", "< 1: # lookup for city city_results = frappe.db.sql(\"\"\" SELECT", "as_pdf = True, output = output, no_letterhead = no_letterhead) output", "`tabPincode` WHERE `city` LIKE '%{plz_city}%' ORDER BY `city` ASC \"\"\".format(plz_city=plz_city),", "if len(answer) > 0: return raise_200(answer) else: # 404 Not", "schlichtungsbehoerden = frappe.db.sql(\"\"\"SELECT `name` FROM `tabArbitration Authority`\"\"\", as_dict=True) for schlichtungsbehoerde", "AS `Titel`, `schlichtungsbehoerde`.`telefon` AS `Telefon`, `schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`, `schlichtungsbehoerde`.`pauschalen` AS", "answer.append(data) if len(answer) > 0: return raise_200(answer) else: # 404", "`schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`, `schlichtungsbehoerde`.`homepage` AS `Homepage` FROM `tabArbitration Authority`", "code, \"message\": \"{message}\".format(message=message) } }] def raise_200(answer): return ['200 OK',", "\"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) > 0: for city in city_results:", "else: result = {} return result def raise_4xx(code, title, message):", "[] # lookup for plz city_results = frappe.db.sql(\"\"\" SELECT `city`,", "BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) < 1: #", "return raise_4xx(404, 'Not Found', 'No results') def get_informations(kanton): search =", "_get_sammel_pdf(no_letterhead=1): output = PdfFileWriter() schlichtungsbehoerden = frappe.db.sql(\"\"\"SELECT `name` FROM `tabArbitration", "für Schlichtungsbehörden', as_pdf = True, output = output, no_letterhead =", "LIKE '%{plz_city}%' ORDER BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results)", "job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead}) return def _get_sammel_pdf(no_letterhead=1): output = PdfFileWriter()", "JOIN `tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE `geminendentbl`.`municipality` =", "Not Found return raise_4xx(404, 'Not Found', 'No results') def get_informations(kanton):", "= kwargs['plz_city'] except: # 400 Bad Request (Missing PLZ/City) return", "`Titel`, `schlichtungsbehoerde`.`telefon` AS `Telefon`, `schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`, `schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`,", "= {} return result def raise_4xx(code, title, message): # 4xx", "plz_city is present try: plz_city = kwargs['plz_city'] except: # 400", "that token is correct if not token == frappe.db.get_single_value('mietrechtspraxis API',", "check that token is correct if not token == frappe.db.get_single_value('mietrechtspraxis", "as_dict=True) if len(city_results) > 0: for city in city_results: data", "Token) return raise_4xx(400, 'Bad Request', 'Token Required') # check that", "as_dict=True) for schlichtungsbehoerde in schlichtungsbehoerden: output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name,", "`city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) < 1: # lookup", "> 0: result = search[0] else: result = {} return", "ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) > 0: for city in", "'Not Found', 'No results') else: # 404 Not Found return", "# 4xx Bad Request / Unauthorized / Not Found return", "if len(search) > 0: result = search[0] else: result =", "data['ort'] = city.city data['gemeinde'] = city.municipality data['bezirk'] = city.district data['kanton']", "Copyright (c) 2021, libracore AG and contributors # For license", "= frappe.utils.pdf.get_file_data_from_writer(output) now = datetime.now() ts = \"{0:04d}-{1:02d}-{2:02d}\".format(now.year, now.month, now.day)", "'{plz_city}' ORDER BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) <", "> 0: for city in city_results: data = {} data['plz']", "PdfFileWriter from frappe.utils.file_manager import save_file class ArbitrationAuthority(Document): pass def _get_sb(**kwargs):", "''' # check that token is present try: token =", "pass def _get_sb(**kwargs): ''' call on [IP]/api/method/mietrechtspraxis.api.get_sb Mandatory Parameter: -", "= frappe.db.sql(\"\"\" SELECT `informationen`, `homepage`, `gesetzessammlung`, `formulare` FROM `tabKantonsinformationen` WHERE", "SELECT `informationen`, `homepage`, `gesetzessammlung`, `formulare` FROM `tabKantonsinformationen` WHERE `kanton` =", "Request (Missing Token) return raise_4xx(400, 'Bad Request', 'Token Required') #", "result = {} return result def raise_4xx(code, title, message): #", "utf-8 -*- # Copyright (c) 2021, libracore AG and contributors", "frappe.db.sql(\"\"\" SELECT `schlichtungsbehoerde`.`titel` AS `Titel`, `schlichtungsbehoerde`.`telefon` AS `Telefon`, `schlichtungsbehoerde`.`kuendigungstermine` AS", "'Invalid Token') # check that plz_city is present try: plz_city", "coding: utf-8 -*- # Copyright (c) 2021, libracore AG and", "datetime.now() ts = \"{0:04d}-{1:02d}-{2:02d}\".format(now.year, now.month, now.day) file_name = \"{0}_{1}.pdf\".format('SB_Sammel-PDF', ts)", "answer] @frappe.whitelist() def get_sammel_pdf(no_letterhead=1): frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead})", "Not Found return raise_4xx(404, 'Not Found', 'No results') else: #", "`elektronische Eingaben`, `schlichtungsbehoerde`.`homepage` AS `Homepage` FROM `tabArbitration Authority` AS `schlichtungsbehoerde`", "Bad Request (Missing PLZ/City) return raise_4xx(400, 'Bad Request', 'PLZ/City Required')", "`municipality`, `district`, `canton` FROM `tabPincode` WHERE `city` LIKE '%{plz_city}%' ORDER", "`city` LIKE '%{plz_city}%' ORDER BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if", "datetime from PyPDF2 import PdfFileWriter from frappe.utils.file_manager import save_file class", "as_dict=True) if len(city_results) < 1: # lookup for city city_results", "AS `Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`, `schlichtungsbehoerde`.`homepage`", "as_dict=True) answer.append(data) if len(answer) > 0: return raise_200(answer) else: #", "`formulare` FROM `tabKantonsinformationen` WHERE `kanton` = '{kanton}' \"\"\".format(kanton=kanton), as_dict=True) if", "['{code} {title}'.format(code=code, title=title), { \"error\": { \"code\": code, \"message\": \"{message}\".format(message=message)", "# Copyright (c) 2021, libracore AG and contributors # For", "no_letterhead) pdf = frappe.utils.pdf.get_file_data_from_writer(output) now = datetime.now() ts = \"{0:04d}-{1:02d}-{2:02d}\".format(now.year,", "`tabArbitration Authority` AS `schlichtungsbehoerde` LEFT JOIN `tabMunicipality Table` AS `geminendentbl`", "Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE `geminendentbl`.`municipality` = '{municipality}' \"\"\".format(municipality=city.municipality),", "404 Not Found return raise_4xx(404, 'Not Found', 'No results') else:", "def raise_4xx(code, title, message): # 4xx Bad Request / Unauthorized", "{title}'.format(code=code, title=title), { \"error\": { \"code\": code, \"message\": \"{message}\".format(message=message) }", "raise_200(answer) else: # 404 Not Found return raise_4xx(404, 'Not Found',", "-*- coding: utf-8 -*- # Copyright (c) 2021, libracore AG", "Token) return raise_4xx(401, 'Unauthorized', 'Invalid Token') # check that plz_city", "len(city_results) < 1: # lookup for city city_results = frappe.db.sql(\"\"\"", "if not token == frappe.db.get_single_value('mietrechtspraxis API', 'token'): # 401 Unauthorized", "raise_4xx(404, 'Not Found', 'No results') def get_informations(kanton): search = frappe.db.sql(\"\"\"", "Sammel-PDF', **{'no_letterhead': no_letterhead}) return def _get_sammel_pdf(no_letterhead=1): output = PdfFileWriter() schlichtungsbehoerden", "return raise_200(answer) else: # 404 Not Found return raise_4xx(404, 'Not", "`informationen`, `homepage`, `gesetzessammlung`, `formulare` FROM `tabKantonsinformationen` WHERE `kanton` = '{kanton}'", "AS `elektronische Eingaben`, `schlichtungsbehoerde`.`homepage` AS `Homepage` FROM `tabArbitration Authority` AS", "result def raise_4xx(code, title, message): # 4xx Bad Request /", "city_results: data = {} data['plz'] = city.plz data['ort'] = city.city", "frappe.db.sql(\"\"\" SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `pincode`", "import datetime from PyPDF2 import PdfFileWriter from frappe.utils.file_manager import save_file", "= city.city data['gemeinde'] = city.municipality data['bezirk'] = city.district data['kanton'] =", "'{kanton}' \"\"\".format(kanton=kanton), as_dict=True) if len(search) > 0: result = search[0]", "= output, no_letterhead = no_letterhead) output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name,", "`canton` FROM `tabPincode` WHERE `pincode` = '{plz_city}' ORDER BY `city`", "schlichtungsbehoerden: output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf = True,", "= city.district data['kanton'] = city.canton data['allgemein'] = get_informations(city.canton) data['schlichtungsbehoerde'] =", "city city_results = frappe.db.sql(\"\"\" SELECT `city`, `municipality`, `district`, `canton` FROM", "> 0: return raise_200(answer) else: # 404 Not Found return", "`tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE `geminendentbl`.`municipality` = '{municipality}'", "FROM `tabArbitration Authority`\"\"\", as_dict=True) for schlichtungsbehoerde in schlichtungsbehoerden: output =", "schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf = True, output = output, no_letterhead =", "# check that token is correct if not token ==", "0: for city in city_results: data = {} data['plz'] =", "Found return raise_4xx(404, 'Not Found', 'No results') def get_informations(kanton): search", "/ Not Found return ['{code} {title}'.format(code=code, title=title), { \"error\": {", "True, output = output, no_letterhead = no_letterhead) output = frappe.get_print(\"Arbitration", "PdfFileWriter() schlichtungsbehoerden = frappe.db.sql(\"\"\"SELECT `name` FROM `tabArbitration Authority`\"\"\", as_dict=True) for", "`homepage`, `gesetzessammlung`, `formulare` FROM `tabKantonsinformationen` WHERE `kanton` = '{kanton}' \"\"\".format(kanton=kanton),", "'Not Found', 'No results') def get_informations(kanton): search = frappe.db.sql(\"\"\" SELECT", "`city`, `municipality`, `district`, `canton` FROM `tabPincode` WHERE `pincode` = '{plz_city}'", "ts = \"{0:04d}-{1:02d}-{2:02d}\".format(now.year, now.month, now.day) file_name = \"{0}_{1}.pdf\".format('SB_Sammel-PDF', ts) save_file(file_name,", "that plz_city is present try: plz_city = kwargs['plz_city'] except: #", "city.district data['kanton'] = city.canton data['allgemein'] = get_informations(city.canton) data['schlichtungsbehoerde'] = frappe.db.sql(\"\"\"", "'No results') def get_informations(kanton): search = frappe.db.sql(\"\"\" SELECT `informationen`, `homepage`,", "`schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung` AS `Rechtsberatung`, `schlichtungsbehoerde`.`elektronische_eingaben` AS `elektronische Eingaben`,", "`schlichtungsbehoerde` LEFT JOIN `tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE", "== frappe.db.get_single_value('mietrechtspraxis API', 'token'): # 401 Unauthorized (Invalid Token) return", "output, no_letterhead = no_letterhead) output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Fragebogen", "`schlichtungsbehoerde`.`telefon` AS `Telefon`, `schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`, `schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung`", "# check that token is present try: token = kwargs['token']", "} }] def raise_200(answer): return ['200 OK', answer] @frappe.whitelist() def", "output = output, no_letterhead = no_letterhead) pdf = frappe.utils.pdf.get_file_data_from_writer(output) now", "search = frappe.db.sql(\"\"\" SELECT `informationen`, `homepage`, `gesetzessammlung`, `formulare` FROM `tabKantonsinformationen`", "`geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE `geminendentbl`.`municipality` = '{municipality}' \"\"\".format(municipality=city.municipality), as_dict=True) answer.append(data)", "0: result = search[0] else: result = {} return result", "title=title), { \"error\": { \"code\": code, \"message\": \"{message}\".format(message=message) } }]", "city.canton data['allgemein'] = get_informations(city.canton) data['schlichtungsbehoerde'] = frappe.db.sql(\"\"\" SELECT `schlichtungsbehoerde`.`titel` AS", "please see license.txt from __future__ import unicode_literals import frappe from", "LEFT JOIN `tabMunicipality Table` AS `geminendentbl` ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE `geminendentbl`.`municipality`", "class ArbitrationAuthority(Document): pass def _get_sb(**kwargs): ''' call on [IP]/api/method/mietrechtspraxis.api.get_sb Mandatory", "\"{message}\".format(message=message) } }] def raise_200(answer): return ['200 OK', answer] @frappe.whitelist()", "answer = [] # lookup for plz city_results = frappe.db.sql(\"\"\"", "results') else: # 404 Not Found return raise_4xx(404, 'Not Found',", "FROM `tabPincode` WHERE `pincode` = '{plz_city}' ORDER BY `city` ASC", "{} data['plz'] = city.plz data['ort'] = city.city data['gemeinde'] = city.municipality", "Bad Request (Missing Token) return raise_4xx(400, 'Bad Request', 'Token Required')", "ON `schlichtungsbehoerde`.`name`=`geminendentbl`.`parent` WHERE `geminendentbl`.`municipality` = '{municipality}' \"\"\".format(municipality=city.municipality), as_dict=True) answer.append(data) if", "frappe.utils.file_manager import save_file class ArbitrationAuthority(Document): pass def _get_sb(**kwargs): ''' call", "= '{plz_city}' ORDER BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results)", "if len(city_results) < 1: # lookup for city city_results =", "`kanton` = '{kanton}' \"\"\".format(kanton=kanton), as_dict=True) if len(search) > 0: result", "plz_city = kwargs['plz_city'] except: # 400 Bad Request (Missing PLZ/City)", "'Bad Request', 'Token Required') # check that token is correct", "len(search) > 0: result = search[0] else: result = {}", "data['gemeinde'] = city.municipality data['bezirk'] = city.district data['kanton'] = city.canton data['allgemein']", "city_results = frappe.db.sql(\"\"\" SELECT `city`, `municipality`, `district`, `canton` FROM `tabPincode`", "lookup for city city_results = frappe.db.sql(\"\"\" SELECT `city`, `municipality`, `district`,", "def get_sammel_pdf(no_letterhead=1): frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead}) return def", "for schlichtungsbehoerde in schlichtungsbehoerden: output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Datenüberprüfung',", "output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf = True, output", "0: return raise_200(answer) else: # 404 Not Found return raise_4xx(404,", "output = PdfFileWriter() schlichtungsbehoerden = frappe.db.sql(\"\"\"SELECT `name` FROM `tabArbitration Authority`\"\"\",", "`schlichtungsbehoerde`.`titel` AS `Titel`, `schlichtungsbehoerde`.`telefon` AS `Telefon`, `schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`, `schlichtungsbehoerde`.`pauschalen`", "output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden', as_pdf =", "= [] # lookup for plz city_results = frappe.db.sql(\"\"\" SELECT", "is present try: token = kwargs['token'] except: # 400 Bad", "@frappe.whitelist() def get_sammel_pdf(no_letterhead=1): frappe.enqueue(method=_get_sammel_pdf, queue='long', job_name='Schlichtungsbehörden Sammel-PDF', **{'no_letterhead': no_letterhead}) return", "`pincode` = '{plz_city}' ORDER BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if", "Unauthorized (Invalid Token) return raise_4xx(401, 'Unauthorized', 'Invalid Token') # check", "= datetime.now() ts = \"{0:04d}-{1:02d}-{2:02d}\".format(now.year, now.month, now.day) file_name = \"{0}_{1}.pdf\".format('SB_Sammel-PDF',", "unicode_literals import frappe from frappe.model.document import Document from datetime import", "frappe.model.document import Document from datetime import datetime from PyPDF2 import", "AS `Telefon`, `schlichtungsbehoerde`.`kuendigungstermine` AS `Kündigungstermine`, `schlichtungsbehoerde`.`pauschalen` AS `Pauschalen`, `schlichtungsbehoerde`.`rechtsberatung` AS", "'Fragebogen für Schlichtungsbehörden', as_pdf = True, output = output, no_letterhead", "Found return raise_4xx(404, 'Not Found', 'No results') else: # 404", "(Missing PLZ/City) return raise_4xx(400, 'Bad Request', 'PLZ/City Required') answer =", "= frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Fragebogen für Schlichtungsbehörden', as_pdf = True,", "'{municipality}' \"\"\".format(municipality=city.municipality), as_dict=True) answer.append(data) if len(answer) > 0: return raise_200(answer)", "return def _get_sammel_pdf(no_letterhead=1): output = PdfFileWriter() schlichtungsbehoerden = frappe.db.sql(\"\"\"SELECT `name`", "`schlichtungsbehoerde`.`homepage` AS `Homepage` FROM `tabArbitration Authority` AS `schlichtungsbehoerde` LEFT JOIN", "is present try: plz_city = kwargs['plz_city'] except: # 400 Bad", "= city.canton data['allgemein'] = get_informations(city.canton) data['schlichtungsbehoerde'] = frappe.db.sql(\"\"\" SELECT `schlichtungsbehoerde`.`titel`", "present try: plz_city = kwargs['plz_city'] except: # 400 Bad Request", "token is present try: token = kwargs['token'] except: # 400", "frappe from frappe.model.document import Document from datetime import datetime from", "'Unauthorized', 'Invalid Token') # check that plz_city is present try:", "import PdfFileWriter from frappe.utils.file_manager import save_file class ArbitrationAuthority(Document): pass def", "WHERE `pincode` = '{plz_city}' ORDER BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True)", "`geminendentbl`.`municipality` = '{municipality}' \"\"\".format(municipality=city.municipality), as_dict=True) answer.append(data) if len(answer) > 0:", "now.day) file_name = \"{0}_{1}.pdf\".format('SB_Sammel-PDF', ts) save_file(file_name, pdf, '', '', is_private=1)", "from datetime import datetime from PyPDF2 import PdfFileWriter from frappe.utils.file_manager", "Required') # check that token is correct if not token", "API', 'token'): # 401 Unauthorized (Invalid Token) return raise_4xx(401, 'Unauthorized',", "FROM `tabKantonsinformationen` WHERE `kanton` = '{kanton}' \"\"\".format(kanton=kanton), as_dict=True) if len(search)", "WHERE `city` LIKE '%{plz_city}%' ORDER BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True)", "raise_200(answer): return ['200 OK', answer] @frappe.whitelist() def get_sammel_pdf(no_letterhead=1): frappe.enqueue(method=_get_sammel_pdf, queue='long',", "= frappe.db.sql(\"\"\" SELECT `schlichtungsbehoerde`.`titel` AS `Titel`, `schlichtungsbehoerde`.`telefon` AS `Telefon`, `schlichtungsbehoerde`.`kuendigungstermine`", "raise_4xx(400, 'Bad Request', 'PLZ/City Required') answer = [] # lookup", "in schlichtungsbehoerden: output = frappe.get_print(\"Arbitration Authority\", schlichtungsbehoerde.name, 'Datenüberprüfung', as_pdf =", "BY `city` ASC \"\"\".format(plz_city=plz_city), as_dict=True) if len(city_results) > 0: for", "Eingaben`, `schlichtungsbehoerde`.`homepage` AS `Homepage` FROM `tabArbitration Authority` AS `schlichtungsbehoerde` LEFT" ]
[ "host: str, port: int) -> Connection: self.__socket.connect((host, port)) return Connection(self.__socket)", "import socket class ClientSocket: def __init__(self) -> None: self.__socket =", ".connection import Connection import socket class ClientSocket: def __init__(self) ->", "def connect(self, host: str, port: int) -> Connection: self.__socket.connect((host, port))", "__init__(self) -> None: self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self, host:", "from .connection import Connection import socket class ClientSocket: def __init__(self)", "class ClientSocket: def __init__(self) -> None: self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "None: self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self, host: str, port:", "self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self, host: str, port: int)", "ClientSocket: def __init__(self) -> None: self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self, host: str, port: int) -> Connection:", "import Connection import socket class ClientSocket: def __init__(self) -> None:", "Connection import socket class ClientSocket: def __init__(self) -> None: self.__socket", "connect(self, host: str, port: int) -> Connection: self.__socket.connect((host, port)) return", "socket class ClientSocket: def __init__(self) -> None: self.__socket = socket.socket(socket.AF_INET,", "socket.SOCK_STREAM) def connect(self, host: str, port: int) -> Connection: self.__socket.connect((host,", "-> None: self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self, host: str,", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self, host: str, port: int) ->", "def __init__(self) -> None: self.__socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) def connect(self," ]
[ "Apache License for the specific # language governing permissions and", "Cone is not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cube Tests schema", "in range(numDataSets): pointsData = allPoints[i] widthsData = allWidths[i] expectedExtent =", "self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False) overMesh.CreateDoubleSidedAttr(True, True) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # make it a", "] pointsSolutions = [ [(1, 1, 0), (1, 1, 0)],", "width ] curvesSolutions = [ [(-.5,-.5,-.5), (3.5,1.5,1.5)], # Test Curve", "self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying) allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'],", "at Default should return lh, not rh. self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded) #", "= Usd.Stage.CreateInMemory('AppliedSchemas.usd') root = s.DefinePrim('/hello') self.assertEqual([], root.GetAppliedSchemas()) # Check duplicates", "1. We can define a prim of its type #", "radiusQuery = Usd.AttributeQuery(radius) self.assertTrue(radiusQuery.HasFallbackValue()) def test_DefineSchema(self): s = Usd.Stage.CreateInMemory() parent", "with 2 widths [(0,0,0), (1,1,1), (2,1,1), (3,0,0)] # Test Curve", "Usd.Stage.CreateInMemory() # Xformable Tests identity = Gf.Matrix4d(1) origin = Gf.Vec3f(0,", "for the specific # language governing permissions and limitations under", "sphere.GetAttributes()]) self.assertFalse(UsdGeom.Sphere(usdGeomSphere)) self.assertFalse('radius' in [a.GetName() for a in usdGeomSphere.GetAttributes()]) def", "a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cone Tests schema = UsdGeom.Cone.Define(stage, \"/Cone\")", "camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) self.assertEqual(camera.GetFStopAttr().Get(), 0.0) camera.GetFStopAttr().Set(2.8) self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5)) self.assertEqual(camera.GetFocusDistanceAttr().Get(),", "# Camera is not a Cylinder self.assertTrue(schema.GetFocalLengthAttr()) # Capsule Tests", "mesh.CreateDoubleSidedAttr(False, False) self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue()) # Number 2: Sparse authoring demotes to", "# Attribute name sanity check. We expect the names returned", "of a well known schema. However, it's not # yet", "Vt.Vec3fArray(pointsAsList) comp = UsdGeom.PointBased.ComputeExtent expectedExtent = comp(pointsAsVec3fArr) actualExtent = comp(pointsAsList)", "in range(numDataSets): pointsData = allPoints[i] expectedExtent = pointBasedSolutions[i] # Compute", "1, 0)], # Zero-Volume Extent Test [(-1, -1, -1), (1,", "of the Licensor # and its affiliates, except as required", "[(-1.5, -1, -4), (3, 3, 5)] # Complex Test, Many", "actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( nurbsCurvesPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent,", "(3,0,0)], # Test Curve with 1 width [(0,0,0), (1,1,1), (2,1,1),", "= Usd.Stage.CreateInMemory() sphere = s.DefinePrim('/sphere', typeName='Sphere') tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName self.assertEqual(tfTypeName,", "has no fallback value self.assertEqual(xformOpOrder.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) # Try authoring", "# Compute extent via generic UsdGeom.Boundable API s = Usd.Stage.CreateInMemory()", "self.assertEqual(allMetadata['custom'], False) # Author a custom property spec. layer =", "= [ [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with", "various # scenarios # Number 1: Sparse and non-sparse authoring", "tuples vec = [(1,2,2),(12,3,3)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3)) #", "self.assertFalse(xformOpOrder.HasFallbackValue()) # Try authoring and reverting... xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder) self.assertTrue(xformOpOrderAttr)", "# Simple Width Test [(-2, -2, -2), (3, 3, 3)],", "the content of the NOTICE file. # # You may", "from pxr import Usd, Tf s = Usd.Stage.CreateInMemory() spherePrim =", "\"/Mesh\") meshPrim.CreatePointsAttr(pointsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( meshPrim, Usd.TimeCode.Default()) for a, b", "= s.DefinePrim('/sphere', typeName='Sphere') # set with list of tuples vec", "True) self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue()) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False) overMesh.CreateDoubleSidedAttr(True, True) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # make", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves", "True) # Check querying of fallback values. sphere = UsdGeom.Sphere.Define(stage,", "schemas show up for t in types: self.assertTrue(prim.HasAPI(t)) # Check", "Check that we get an exception for unknown and non-API", "include 'allowedTokens', # provided by the property definition. visibility =", "Tests schema = UsdGeom.Cone.Define(stage, \"/Cone\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "Test, Many Points/Widths [(3, -1, 5), (-1.5, 0, 3), (1,", "# Camera is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera is not", "= UsdGeom.Cube.Define(stage, \"/Cube\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube", "self.assertFalse('radius' in [a.GetName() for a in usdGeomSphere.GetAttributes()]) def test_ComputeExtent(self): from", "expectedExtent = pointBasedSolutions[i] # Compute extent via generic UsdGeom.Boundable API.", "UsdGeom.Boundable.ComputeExtentFromPlugins( meshPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a,", "pointsData = allPoints[i] expectedExtent = pointBasedSolutions[i] actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData) for", "generic UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s, \"/NurbsCurves\")", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope is not a Mesh", "and non-sparse authoring on def'd prim mesh.CreateDoubleSidedAttr(False, True) self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue()) mesh.CreateDoubleSidedAttr(False,", "to this prim UsdGeom.ModelAPI.Apply(prim) UsdGeom.MotionAPI.Apply(prim) # Check that all our", "in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Test UsdGeomCurves curvesPoints", "False) # Author a custom property spec. layer = s.GetRootLayer()", "pointsPrim.CreatePointsAttr(pointsData) pointsPrim.CreateWidthsAttr(widthsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( pointsPrim, Usd.TimeCode.Default()) if actualExtent is", "NurbsPatch is not a Cylinder self.assertTrue(schema.GetUKnotsAttr()) # Points Tests schema", "Make sure uniform access behaves as expected. # ori =", "pxr import Usd, Tf s = Usd.Stage.CreateInMemory() spherePrim = s.DefinePrim('/sphere',", "curves = UsdGeom.BasisCurves.Define(stage, \"/Curves\") self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex) self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex) # Before", "# Test Curve with 1 width [(0,0,0), (1,1,1), (2,1,1), (3,0,0)],", "not a Cylinder self.assertTrue(schema.GetBasisAttr()) # Camera Tests schema = UsdGeom.Camera.Define(stage,", "Multiple Width Test # Erroneous Widths/Points Test -> Returns None", "# Number 1: Sparse and non-sparse authoring on def'd prim", "the Apache License and the following modification to it: #", "[1], # Test Curve with 1 width [.5, .1], #", "1099511627776, 0, -42] schema.CreateIdsAttr(ids) resolvedIds = list(schema.GetIdsAttr().Get()) # convert VtArray", "schema = UsdGeom.NurbsCurves.Define(stage, \"/NurbsCurves\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "License. # pylint: disable=map-builtin-not-iterating import sys, unittest from pxr import", "set with list of tuples vec = [(1,2,2),(12,3,3)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0],", "UsdGeom.Points.Define(s, \"/Points\") pointsPrim.CreatePointsAttr(pointsData) pointsPrim.CreateWidthsAttr(widthsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( pointsPrim, Usd.TimeCode.Default()) if", "pxr import Tf xform = Tf.Type.FindByName(\"UsdGeomXform\") imageable = Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI", "# Cone is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone is", "fallback value self.assertEqual(xformOpOrder.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) # Try authoring and reverting...", "# a prim's typename. s = Usd.Stage.CreateInMemory() sphere = s.DefinePrim('/sphere',", "no width ] curvesWidths = [ [1], # Test Curve", "# XXX: This is awful, it'd be nice to not", "is in fact # authored at the current edit target.", "Capsule Tests schema = UsdGeom.Capsule.Define(stage, \"/Capsule\") self.assertTrue(schema) prim = schema.GetPrim()", "not a Cylinder self.assertTrue(schema.GetWidthsAttr()) # Scope Tests schema = UsdGeom.Scope.Define(stage,", "= s.GetRootLayer() sphereSpec = layer.GetPrimAtPath('/sphere') radiusSpec = Sdf.AttributeSpec( sphereSpec, 'radius',", "XXX: This is awful, it'd be nice to not do", "its own compute extent function, so # it should fall", "1e-5)) basisCurvesPrim = UsdGeom.BasisCurves.Define(s, \"/BasisCurves\") basisCurvesPrim.CreatePointsAttr(pointsData) basisCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(", "UsdGeom.Boundable.ComputeExtentFromPlugins( nurbsCurvesPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a,", "numDataSets = len(curvesPoints) for i in range(numDataSets): pointsData = curvesPoints[i]", "'/sphere').GetPrim() radius = spherePrim.GetAttribute('radius') self.assertTrue(radius.HasMetadata('custom')) self.assertTrue(radius.HasMetadata('typeName')) self.assertTrue(radius.HasMetadata('variability')) self.assertTrue(radius.IsDefined()) self.assertTrue(not radius.IsCustom())", "Extent Test [(0, 0, 0), (0, 0, 0)], # Simple", "Tests schema = UsdGeom.Mesh.Define(stage, \"/Mesh\") self.assertTrue(schema) prim = schema.GetPrim() self.assertTrue(prim.IsA(UsdGeom.Mesh))", "UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() pointsPrim = UsdGeom.Points.Define(s, \"/Points\") pointsPrim.CreatePointsAttr(pointsData)", "Sdf.ValueTypeNames.Double, variability=Sdf.VariabilityUniform, declaresCustom=True) self.assertTrue(radiusSpec.custom) self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform) # Definition should win.", "# BasisCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves is", "UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera is Xformable self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective') camera.GetProjectionAttr().Set('orthographic')", "Usd.AttributeQuery(radius) self.assertTrue(radiusQuery.HasFallbackValue()) def test_DefineSchema(self): s = Usd.Stage.CreateInMemory() parent = s.OverridePrim('/parent')", "mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10) # \"leftHanded\" should have been authored at Usd.TimeCode.Default,", "attribute of a well known schema. However, it's not #", "= sphere.GetRadiusAttr() self.assertTrue(radius.HasFallbackValue()) radiusQuery = Usd.AttributeQuery(radius) self.assertTrue(radiusQuery.HasFallbackValue()) def test_DefineSchema(self): s", "10)) self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(5, 10), 1e-5)) self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray()) cp = Vt.Vec4fArray([(1,", "change for Bug111239, but now tests that this # fix", "Test Curve with 1 width [(-.25,-.25,-.25), (3.25,1.25,1.25)], # Test Curve", "XFormable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh is not a Cylinder self.assertTrue(schema.GetFaceVertexCountsAttr()) #", "Curve with 2 widths (MAX) [(0,0,0), (3,1,1)], # Test Curve", "= UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera is Xformable self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective')", "i in range(numDataSets): pointsData = allPoints[i] widthsData = allWidths[i] expectedExtent", "# Apply our schemas to this prim UsdGeom.ModelAPI.Apply(prim) UsdGeom.MotionAPI.Apply(prim) #", "self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue()) mesh.CreateDoubleSidedAttr(False, False) self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue()) # Number 2: Sparse authoring demotes", "C++ typename be used as # a prim's typename. s", "Tf.Type.FindByName('UsdTyped')] # Our sphere prim should return true on IsA", "width ] # Perform the actual v. expected comparison numDataSets", "visibility = spherePrim.GetAttribute('visibility') self.assertTrue(visibility.IsDefined()) self.assertTrue('allowedTokens' in visibility.GetAllMetadata()) # Assert that", "1e-5)) def test_TypeUsage(self): # Perform Type-Ness Checking for ComputeExtent pointsAsList", "list of tuples vec = [(1,2,2),(12,3,3)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1],", "should work. mesh = UsdGeom.Mesh.Define(s, '/parent/mesh') self.assertTrue(mesh) def test_BasicMetadataCases(self): s", "its affiliates, except as required to comply with Section 4(c)", "from pxr import Usd, Tf s = Usd.Stage.CreateInMemory() prim =", "Capsule is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule is not a", "self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue()) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False) overMesh.CreateDoubleSidedAttr(True, True) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # make it", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube is not a Mesh", "radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # Author a", "scope) # Make a mesh at a different path, should", "or agreed to in writing, software # distributed under the", "[a.GetName() for a in usdGeomSphere.GetAttributes()]) def test_ComputeExtent(self): from pxr import", "Test [2, 4], # Multiple Width Test [2, 4, 5],", "Curve with 1 width [(-.25,-.25,-.25), (3.25,1.25,1.25)], # Test Curve with", "pxr import Usd, Tf s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/prim')", "s.DefinePrim('/sphere', typeName='Sphere') typelessPrim = s.DefinePrim('/regular') types = [Tf.Type.FindByName('UsdGeomSphere'), Tf.Type.FindByName('UsdGeomGprim'), Tf.Type.FindByName('UsdGeomBoundable'),", "zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_Bug116593(self): from pxr import", "2 widths [] # Test Curve with no width ]", "= Vt.Vec4fArray([(1, 2, 3, 4), (8, 7, 6, 5)]) camera.GetClippingPlanesAttr().Set(cp)", "transforms the scope into a # mesh, since Define() always", "plain prim should return false # for all of them.", "sphereSpec, 'radius', Sdf.ValueTypeNames.Double, variability=Sdf.VariabilityUniform, declaresCustom=True) self.assertTrue(radiusSpec.custom) self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform) # Definition", "Cylinder self.assertTrue(schema.GetAxisAttr()) # Cone Tests schema = UsdGeom.Cone.Define(stage, \"/Cone\") self.assertTrue(schema)", "that we get exceptions but don't crash when applying to", "to floats from # num.float32s due to the way Gf.Vec3f", "NurbsCurves Tests schema = UsdGeom.NurbsCurves.Define(stage, \"/NurbsCurves\") self.assertTrue(schema) prim = schema.GetPrim()", "[(0, 0, 0), (0, 0, 0)], # Simple Width Test", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh is a XFormable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh is", "Tests schema = UsdGeom.Scope.Define(stage, \"/Scope\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "direct subclass xformOpOrder = xform.GetXformOpOrderAttr() self.assertFalse(xformOpOrder.HasAuthoredValue()) # xformOpOrder has no", "negative numbers ids = [8589934592, 1099511627776, 0, -42] schema.CreateIdsAttr(ids) resolvedIds", "None: for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5))", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch", "Usd, Tf s = Usd.Stage.CreateInMemory() spherePrim = s.DefinePrim('/sphere', typeName='Sphere') typelessPrim", "stage = Usd.Stage.Open(l.identifier) # For every prim schema type in", "'double') allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False)", "be nice to not do it extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])),", "(3,1,1)], # Test Curve with no width ] # Perform", "self.assertTrue(schema.GetWidthsAttr()) # Scope Tests schema = UsdGeom.Scope.Define(stage, \"/Scope\") self.assertTrue(schema) prim", "Try to make a mesh at subscope's path. This transforms", "-1), (1, 1, 1)], [(-1.5, -1, -4), (3, 3, 5)]", "actualExtent = comp(pointsAsList) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a,", "of extremeExtentArr to floats from # num.float32s due to the", "is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule is not a Cylinder", "v. expected comparison numDataSets = len(curvesPoints) for i in range(numDataSets):", "def test_Fallbacks(self): # Author Scene and Compose Stage stage =", "not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cone Tests schema = UsdGeom.Cone.Define(stage,", "1, 0), (1, 1, 0)], # Zero-Volume Extent Test [(0,", "Tf.Type.FindByName('UsdGeomXformable'), Tf.Type.FindByName('UsdGeomImageable'), Tf.Type.FindByName('UsdTyped')] # Our sphere prim should return true", "test_Concrete(self): from pxr import Tf xform = Tf.Type.FindByName(\"UsdGeomXform\") imageable =", "1)], # Multiple Width Test [(-1, -1, -1), (1, 1,", "actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Test UsdGeomCurves curvesPoints = [", "\"rightHanded\" was set at t=10, so reading *any* time should", "actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( basisCurvesPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent,", "[(1, 1, 0)], # Zero-Volume Extent Test [(0, 0, 0)],", "= UsdGeom.Mesh.Define(stage, \"/overMesh\") self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # Check querying of fallback", "not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cube Tests schema = UsdGeom.Cube.Define(stage,", "self.assertTrue(schema.GetFocalLengthAttr()) # Capsule Tests schema = UsdGeom.Capsule.Define(stage, \"/Capsule\") self.assertTrue(schema) prim", "schema type in this module, validate that: # 1. We", "is a registered attribute of a well known schema. However,", "t=10, so reading *any* time should # return \"rightHanded\" self.assertEqual(ori.Get(9.9),", "return false # for all of them. for t in", "sanity check it still evals the same mesh2 = UsdGeom.Mesh.Define(stage,", "schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) # Test that id's roundtrip", "type in this module, validate that: # 1. We can", "and reverting... xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder) self.assertTrue(xformOpOrderAttr) self.assertEqual(xformOpOrderAttr.Get(), None) opOrderVal =", "yet authored at the current edit target. self.assertTrue(ori.IsDefined()) self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget()))", "# Xform is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform is not", "None) self.assertTrue(xformOpOrderAttr.Clear()) self.assertFalse(xformOpOrderAttr.HasAuthoredValue()) self.assertEqual(xformOpOrderAttr.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) mesh = UsdGeom.Mesh.Define(stage, \"/Mesh\")", "the Apache License. # pylint: disable=map-builtin-not-iterating import sys, unittest from", "Compute extent via generic UsdGeom.Boundable API. # UsdGeom.Mesh does not", "= len(allPoints) for i in range(numDataSets): pointsData = allPoints[i] expectedExtent", "Points Tests schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) prim = schema.GetPrim()", "(1, 1, 1)], # Simple Width Test [(-2, -2, -2),", "the current edit target. self.assertTrue(ori.IsDefined()) self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) # Author a", "[1, 2, 2, 1] # Complex Test, Many Points/Widths ]", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "# Perform Type-Ness Checking for ComputeExtent pointsAsList = [(0, 0,", "# null prim. with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim())) with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim())) def", "Points Tests schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) # Test that", "self.assertTrue(schema.GetAxisAttr()) # Cone Tests schema = UsdGeom.Cone.Define(stage, \"/Cone\") self.assertTrue(schema) prim", "not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "n == \"primvars:displayOpacity\": n = \"displayOpacity\" name = n[0].upper() +", "Width Test [(-2, -2, -2), (3, 3, 3)], # Multiple", "mesh.GetSchemaAttributeNames(False)) for n in mesh.GetSchemaAttributeNames(): # apiName overrides if n", "a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule is not a Cylinder self.assertTrue(schema.GetAxisAttr())", "UsdGeom.PointBased.ComputeExtent expectedExtent = comp(pointsAsVec3fArr) actualExtent = comp(pointsAsList) for a, b", "have its own compute extent function, so # it should", "via the generic API. # self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0) self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False))", "spherePrim.GetAttribute('visibility') self.assertTrue(visibility.IsDefined()) self.assertTrue('allowedTokens' in visibility.GetAllMetadata()) # Assert that attribute fallback", "2, 1] # Complex Test, Many Points/Widths ] pointBasedSolutions =", "Usd.TimeCode.Default, so reading the # attribute at Default should return", "the following modification; you may not use this file except", "for non-defed prim overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh')) overMesh.CreateDoubleSidedAttr(False, True) self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue()) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(),", "xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder) self.assertTrue(xformOpOrderAttr) self.assertEqual(xformOpOrderAttr.Get(), None) opOrderVal = [\"xformOp:transform\"] self.assertTrue(xformOpOrderAttr.Set(opOrderVal)) self.assertTrue(xformOpOrderAttr.HasAuthoredValue()) self.assertNotEqual(xformOpOrderAttr.Get(),", "prim UsdGeom.ModelAPI.Apply(prim) UsdGeom.MotionAPI.Apply(prim) # Check that all our applied schemas", "builtin properties is available and defined # BasisCurves Tests schema", "s = Usd.Stage.CreateInMemory() sphere = s.DefinePrim('/sphere', typeName='Sphere') tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName", "License\") # with the following modification; you may not use", "Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3)) # set with Gf vecs vec =", "on def'd prim mesh.CreateDoubleSidedAttr(False, True) self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue()) mesh.CreateDoubleSidedAttr(False, False) self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue()) #", "p.GetAttribute(\"orientation\") # The generic orientation attribute should be automatically defined", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points is", "+ \"Attr() not found in: \" + str(dir(mesh)))) def test_IsA(self):", "self.assertTrue(scope) # Assert that a simple find or create gives", "Test Curve with no width ] curvesWidths = [ [1],", "# Points is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points is", "Compute extent via generic UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() nurbsCurvesPrim", "value self.assertEqual(xformOpOrder.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) # Try authoring and reverting... xformOpOrderAttr", "2017 Pixar # # Licensed under the Apache License, Version", "+ name + \"Attr\") in dir(mesh), (\"Get\" + name +", "pointsData = allPoints[i] expectedExtent = pointBasedSolutions[i] # Compute extent via", "= s.DefinePrim('/prim') types = [Tf.Type.FindByName('UsdGeomMotionAPI'), Tf.Type.FindByName('UsdGeomModelAPI')] # Check that no", "is a Cylinder self.assertTrue(schema.GetAxisAttr()) # Mesh Tests schema = UsdGeom.Mesh.Define(stage,", "s.DefinePrim('/hello') self.assertEqual([], root.GetAppliedSchemas()) # Check duplicates UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) UsdGeom.MotionAPI.Apply(root)", "= UsdGeom.Curves.ComputeExtent(pointsData, widths) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a,", "be automatically defined because # it is a registered attribute", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves is not a Mesh", "== \"primvars:displayOpacity\": n = \"displayOpacity\" name = n[0].upper() + n[1:]", "b, 1e-5)) def test_Bug116593(self): from pxr import Gf s =", "but now tests that this # fix has been reverted.", "= s.DefinePrim('/regular') types = [Tf.Type.FindByName('UsdGeomSphere'), Tf.Type.FindByName('UsdGeomGprim'), Tf.Type.FindByName('UsdGeomBoundable'), Tf.Type.FindByName('UsdGeomXformable'), Tf.Type.FindByName('UsdGeomImageable'), Tf.Type.FindByName('UsdTyped')]", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone", "= \"displayOpacity\" name = n[0].upper() + n[1:] self.assertTrue((\"Get\" + name", "= UsdGeom.Mesh(p) self.assertTrue(mesh) self.assertTrue(mesh.GetPrim()) self.assertTrue(not mesh.GetPointsAttr().Get(1)) self.assertEqual(p.GetTypeName(), Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType())) # #", "NurbsPatch Tests schema = UsdGeom.NurbsPatch.Define(stage, \"/NurbsPatch\") self.assertTrue(schema) prim = schema.GetPrim()", "import Gf # Create some simple test cases allPoints =", "null prim. with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim())) with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim())) def test_IsATypeless(self):", "UsdGeom.MotionAPI.Apply(prim) # Check that all our applied schemas show up", "Mesh is a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh is a XFormable", "= [ [0], # Zero-Volume Extent Test [2], # Simple", "curvesPoints[i] widths = curvesWidths[i] expectedExtent = curvesSolutions[i] actualExtent = UsdGeom.Curves.ComputeExtent(pointsData,", "\"/Camera\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera is not", "Usd.Stage.Open(l.identifier) # For every prim schema type in this module,", "def test_ComputeExtent(self): from pxr import Gf # Create some simple", "expectedExtent = curvesSolutions[i] actualExtent = UsdGeom.Curves.ComputeExtent(pointsData, widths) for a, b", "self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI)) def test_Concrete(self): from pxr import Tf xform = Tf.Type.FindByName(\"UsdGeomXform\")", "in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Points Test for", "vec = [(1,2,2),(12,3,3)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3)) # set", "the License and to reproduce the content of the NOTICE", "dense for non-defed prim overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh')) overMesh.CreateDoubleSidedAttr(False, True) self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue())", "a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cube Tests schema = UsdGeom.Cube.Define(stage, \"/Cube\")", "1, 0)], # Zero-Volume Extent Test [(0, 0, 0), (0,", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera", "prim of its type # 2. Its type and inheritance", "self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex) # Before we go, test that CreateXXXAttr performs", "defined because # it is a registered attribute of a", "Gf, Tf class TestUsdGeomSchemata(unittest.TestCase): def test_Basic(self): l = Sdf.Layer.CreateAnonymous() stage", "self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3)) # set with Gf vecs vec = [Gf.Vec3f(1,2,2),", "the above modification is # distributed on an \"AS IS\"", "reverting... xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder) self.assertTrue(xformOpOrderAttr) self.assertEqual(xformOpOrderAttr.Get(), None) opOrderVal = [\"xformOp:transform\"]", "(1, 1, 1)], # Erroneous Widths/Points Test # Complex Test,", "Zero-Volume Extent Test [(-1, -1, -1), (1, 1, 1)], #", "import Tf xform = Tf.Type.FindByName(\"UsdGeomXform\") imageable = Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI =", "self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI)) def test_Apply(self): s = Usd.Stage.CreateInMemory('AppliedSchemas.usd') root = s.DefinePrim('/hello')", "a Cylinder self.assertTrue(schema.GetWidthsAttr()) # Scope Tests schema = UsdGeom.Scope.Define(stage, \"/Scope\")", "# apiName overrides if n == \"primvars:displayColor\": n = \"displayColor\"", "basisCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( basisCurvesPrim, Usd.TimeCode.Default()) for a, b in", "the specific # language governing permissions and limitations under the", "camera.GetFStopAttr().Set(2.8) self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5)) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0) camera.GetFocusDistanceAttr().Set(10.0) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0) def", "Cylinder is a Xformable self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder is a Cylinder", "is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube is not a Cylinder", "by the property definition. visibility = spherePrim.GetAttribute('visibility') self.assertTrue(visibility.IsDefined()) self.assertTrue('allowedTokens' in", "current edit target. ori.Set(UsdGeom.Tokens.leftHanded) self.assertTrue(ori.IsDefined()) self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10) # \"leftHanded\"", "all of them. for t in types: self.assertTrue(spherePrim.IsA(t)) self.assertFalse(typelessPrim.IsA(t)) def", "# Cone is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone is not", "a prim of its type # 2. Its type and", "[Tf.Type.FindByName('UsdGeomMotionAPI'), Tf.Type.FindByName('UsdGeomModelAPI')] # Check that no APIs have yet been", "10.0) def test_Points(self): stage = Usd.Stage.CreateInMemory() # Points Tests schema", "with 2 widths [] # Test Curve with no width", "# Multiple Width Test # Erroneous Widths/Points Test -> Returns", "so reading the # attribute at Default should return lh,", "APIs have yet been applied for t in types: self.assertFalse(prim.HasAPI(t))", "False) overMesh.CreateDoubleSidedAttr(True, True) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # make it a defined", "Test # Erroneous Widths/Points Test -> Returns None None, [(-2.5,", "schema = UsdGeom.NurbsPatch.Define(stage, \"/NurbsPatch\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "Usd, UsdGeom, Vt, Gf, Tf class TestUsdGeomSchemata(unittest.TestCase): def test_Basic(self): l", "(2, 2, 2)] pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList) comp = UsdGeom.PointBased.ComputeExtent expectedExtent", "# Points Tests schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) prim =", "Erroneous Widths/Points Test -> Ok For Point-Based [(-1, -1, -1),", "Gf.Vec3f is wrapped out # XXX: This is awful, it'd", "You may obtain a copy of the Apache License at", "schema = UsdGeom.Xform.Define(stage, \"/Xform\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "None) self.assertFalse(xformOpOrder.HasFallbackValue()) mesh = UsdGeom.Mesh.Define(stage, \"/Mesh\") # multiple ancestor hops", "'radius', Sdf.ValueTypeNames.Double, variability=Sdf.VariabilityUniform, declaresCustom=True) self.assertTrue(radiusSpec.custom) self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform) # Definition should", "Usd.Stage.CreateInMemory() spherePrim = s.DefinePrim('/sphere', typeName='Sphere') typelessPrim = s.DefinePrim('/regular') types =", "edit target. self.assertTrue(ori.IsDefined()) self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) # Author a value, and", "not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "None, [(-2.5, -1.5, -4.5), (3.5, 4, 5.5)] # Complex Test,", "# Perform the actual v. expected comparison numDataSets = len(curvesPoints)", "expectedExtent = pointBasedSolutions[i] actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData) for a, b in", "s.DefinePrim('/prim') types = [Tf.Type.FindByName('UsdGeomMotionAPI'), Tf.Type.FindByName('UsdGeomModelAPI')] # Check that no APIs", "# Points Test for i in range(numDataSets): pointsData = allPoints[i]", "to map the contents of extremeExtentArr to floats from #", "Section 6. Trademarks. is deleted and replaced with: # #", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "(0, 0, 0)], # Simple Width Test [(-1, -1, -1),", "# Points Tests schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) # Test", "Test [(0, 0, 0)], # Simple Width Test [(-1, -1,", "def test_IsA(self): # Author Scene and Compose Stage l =", "actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Compute extent via generic UsdGeom.Boundable", "[] # Test Curve with no width ] curvesSolutions =", "Cone is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone is a", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube", "self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex) self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex) # Before we go, test that", "that: # 1. We can define a prim of its", "opOrderVal = [\"xformOp:transform\"] self.assertTrue(xformOpOrderAttr.Set(opOrderVal)) self.assertTrue(xformOpOrderAttr.HasAuthoredValue()) self.assertNotEqual(xformOpOrderAttr.Get(), None) self.assertTrue(xformOpOrderAttr.Clear()) self.assertFalse(xformOpOrderAttr.HasAuthoredValue()) self.assertEqual(xformOpOrderAttr.Get(),", "the Apache License with the above modification is # distributed", "Width Test [(-1, -1, -1), (1, 1, 1)], # Multiple", "Test [(-1, -1, -1), (1, 1, 1)], # Erroneous Widths/Points", "to comply with Section 4(c) of # the License and", "expectedExtent is not None: for a, b in zip(expectedExtent, actualExtent):", "is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform is a Xformable", "with Gf vecs vec = [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2))", "writing, software # distributed under the Apache License with the", "# Scope is not a Cylinder # Scope has no", "Test for i in range(numDataSets): pointsData = allPoints[i] widthsData =", "= [(1,2,2),(12,3,3)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3)) # set with", "-2), (2, 2, -4)], ] allWidths = [ [0], #", "= Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI)) def test_Apply(self): s = Usd.Stage.CreateInMemory('AppliedSchemas.usd')", "self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # make it a defined mesh, and sanity", "now is in fact # authored at the current edit", "is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere is not a Cylinder", "UsdGeom.BasisCurves.Define(stage, \"/Curves\") self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex) self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex) # Before we go,", "Gf vecs vec = [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1],", "b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_TypeUsage(self): #", "# Section 6. Trademarks. is deleted and replaced with: #", "returned by the schema # to match the names returned", "marks, or product names of the Licensor # and its", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "0, 0), (0, 0, 0)], # Simple Width Test [(-1,", "it should fall back to the extent for PointBased prims.", "4, 5], # Erroneous Widths/Points Test [1, 2, 2, 1]", "# UsdGeom.Mesh does not have its own compute extent function,", "Test [(-1, -1, -1), (1, 1, 1)], # Simple Width", "self.assertFalse(prim.HasAPI(t)) # Apply our schemas to this prim UsdGeom.ModelAPI.Apply(prim) UsdGeom.MotionAPI.Apply(prim)", "is not a Cylinder self.assertTrue(schema.GetRadiusAttr()) # Xform Tests schema =", "1: Sparse and non-sparse authoring on def'd prim mesh.CreateDoubleSidedAttr(False, True)", "# The value \"rightHanded\" was set at t=10, so reading", "# Test that id's roundtrip properly, for big numbers, and", "unknown and non-API types with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.Unknown) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomXform'))", "[(-1, -1, -1), (1, 1, 1)], # Simple Width Test", "with no width ] # Perform the actual v. expected", "always authors typeName. mesh = UsdGeom.Mesh.Define(s, '/parent/subscope') self.assertTrue(mesh) self.assertTrue(not scope)", "= radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # Author", "self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) self.assertEqual(camera.GetFStopAttr().Get(), 0.0) camera.GetFStopAttr().Set(2.8) self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5)) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0)", "Complex Test, Many Points/Widths ] pointsSolutions = [ [(1, 1,", "a Cylinder # Scope has no builtins! # Sphere Tests", "# Licensed under the Apache License, Version 2.0 (the \"Apache", "# Cylinder is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder is", "# Test Curve with no width ] curvesSolutions = [", "2, 3, 4), (8, 7, 6, 5)]) camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp)", "= UsdGeom.Xform.Define(stage, \"/Xform\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform", "UsdGeom.Boundable API. # UsdGeom.Mesh does not have its own compute", "License with the above modification is # distributed on an", "Xform Tests schema = UsdGeom.Xform.Define(stage, \"/Xform\") self.assertTrue(schema) prim = schema.GetPrim()", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule is not a", "# xformOpOrder has no fallback value self.assertEqual(xformOpOrder.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) #", "def test_DefineSchema(self): s = Usd.Stage.CreateInMemory() parent = s.OverridePrim('/parent') self.assertTrue(parent) #", "actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_TypeUsage(self): # Perform Type-Ness Checking", "a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_Bug116593(self):", "# 3. At least one of its builtin properties is", "yet been applied for t in types: self.assertFalse(prim.HasAPI(t)) # Apply", "Cylinder is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder is a", "can define a prim of its type # 2. Its", "validate that: # 1. We can define a prim of", "25.4, 1e-5)) camera.GetHorizontalApertureAttr().Set(3.0) self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0) self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(), 0.602 * 25.4, 1e-5))", "the scope back. self.assertTrue(s.OverridePrim('/parent/subscope')) self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim()) # Try to make", "# Multiple Width Test # Erroneous Widths/Points Test -> Ok", "in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_Bug116593(self): from pxr", "[2, 4], # Multiple Width Test [2, 4, 5], #", "# # Unless required by applicable law or agreed to", "[\"xformOp:transform\"] self.assertTrue(xformOpOrderAttr.Set(opOrderVal)) self.assertTrue(xformOpOrderAttr.HasAuthoredValue()) self.assertNotEqual(xformOpOrderAttr.Get(), None) self.assertTrue(xformOpOrderAttr.Clear()) self.assertFalse(xformOpOrderAttr.HasAuthoredValue()) self.assertEqual(xformOpOrderAttr.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue())", "[(0,0,0), (3,1,1)], # Test Curve with no width ] #", "Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube is not a Cylinder self.assertTrue(schema.GetSizeAttr()) #", "not None: for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b,", "Tests schema = UsdGeom.Cylinder.Define(stage, \"/Cylinder\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "at a different path, should work. mesh = UsdGeom.Mesh.Define(s, '/parent/mesh')", "since Define() always authors typeName. mesh = UsdGeom.Mesh.Define(s, '/parent/subscope') self.assertTrue(mesh)", "self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective') camera.GetProjectionAttr().Set('orthographic') self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic') self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(), 0.825 * 25.4, 1e-5))", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "def test_Camera(self): from pxr import Gf stage = Usd.Stage.CreateInMemory() camera", "a mesh at subscope's path. This transforms the scope into", "\"rightHanded\" self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded) #", "self.assertTrue(xformOpOrderAttr.HasAuthoredValue()) self.assertNotEqual(xformOpOrderAttr.Get(), None) self.assertTrue(xformOpOrderAttr.Clear()) self.assertFalse(xformOpOrderAttr.HasAuthoredValue()) self.assertEqual(xformOpOrderAttr.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) mesh =", "is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points is not a Cylinder", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "self.assertTrue(xformOpOrderAttr.Clear()) self.assertFalse(xformOpOrderAttr.HasAuthoredValue()) self.assertEqual(xformOpOrderAttr.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) mesh = UsdGeom.Mesh.Define(stage, \"/Mesh\") #", "= pointBasedSolutions[i] actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData) for a, b in zip(expectedExtent,", "non-defed prim overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh')) overMesh.CreateDoubleSidedAttr(False, True) self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue()) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False)", "Try authoring and reverting... xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder) self.assertTrue(xformOpOrderAttr) self.assertEqual(xformOpOrderAttr.Get(), None)", "under the Apache License, Version 2.0 (the \"Apache License\") #", "is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule is a Xformable", "Compose Stage l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) # For", "[(1,2,2),(12,3,3)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3)) # set with Gf", "BasisCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves is not a", "= UsdGeom.Mesh(stage.OverridePrim('/overMesh')) overMesh.CreateDoubleSidedAttr(False, True) self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue()) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False) overMesh.CreateDoubleSidedAttr(True, True) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(),", "# Complex Test, Many Points/Widths ] pointBasedSolutions = [ [(1,", "self.assertEqual(p.GetTypeName(), Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType())) # # Make sure uniform access behaves as", "Test [(-1, -1, -1), (1, 1, 1)], # Multiple Width", "camera.GetFocusDistanceAttr().Set(10.0) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0) def test_Points(self): stage = Usd.Stage.CreateInMemory() # Points", "test_IsA(self): # Author Scene and Compose Stage l = Sdf.Layer.CreateAnonymous()", "class TestUsdGeomSchemata(unittest.TestCase): def test_Basic(self): l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier)", "self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3)) # set with Gf vecs vec", "# Check that no APIs have yet been applied for", "# Scope Tests schema = UsdGeom.Scope.Define(stage, \"/Scope\") self.assertTrue(schema) prim =", "allPoints[i] expectedExtent = pointBasedSolutions[i] actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData) for a, b", "(3, 3, 3)], # Multiple Width Test # Erroneous Widths/Points", "in mesh.GetSchemaAttributeNames(): # apiName overrides if n == \"primvars:displayColor\": n", "# Cylinder is a Cylinder self.assertTrue(schema.GetAxisAttr()) # Mesh Tests schema", "(1,1,1), (2,1,1), (3,0,0)] # Test Curve with no width ]", "s = Usd.Stage.CreateInMemory() spherePrim = UsdGeom.Sphere.Define(s, '/sphere').GetPrim() radius = spherePrim.GetAttribute('radius')", "returned for builtin attributes. do = spherePrim.GetAttribute('primvars:displayOpacity') self.assertTrue(do.IsDefined()) self.assertTrue(do.Get() is", "types: self.assertTrue(spherePrim.IsA(t)) self.assertFalse(typelessPrim.IsA(t)) def test_HasAPI(self): from pxr import Usd, Tf", "# Ensure duplicates aren't picked up UsdGeom.ModelAPI.Apply(root) self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas())", "Tf class TestUsdGeomSchemata(unittest.TestCase): def test_Basic(self): l = Sdf.Layer.CreateAnonymous() stage =", "the current edit target. ori.Set(UsdGeom.Tokens.leftHanded) self.assertTrue(ori.IsDefined()) self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10) #", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder is a Xformable self.assertTrue(prim.IsA(UsdGeom.Cylinder)) #", "0, 0), (1, 1, 1), (2, 2, 2)] pointsAsVec3fArr =", "Width Test # Erroneous Widths/Points Test -> Ok For Point-Based", "] # Perform the actual v. expected comparison numDataSets =", "import Gf stage = Usd.Stage.CreateInMemory() camera = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable))", "not a Cylinder # Scope has no builtins! # Sphere", "test_Basic(self): l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) p = stage.DefinePrim(\"/Mesh\",", "# Create some simple test cases allPoints = [ [(1,", "+ n[1:] self.assertTrue((\"Get\" + name + \"Attr\") in dir(mesh), (\"Get\"", "# ori = p.GetAttribute(\"orientation\") # The generic orientation attribute should", "# self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0) self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False)) for n in mesh.GetSchemaAttributeNames():", "= Usd.AttributeQuery(radius) self.assertTrue(radiusQuery.HasFallbackValue()) def test_DefineSchema(self): s = Usd.Stage.CreateInMemory() parent =", "Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI)) def test_Apply(self): s", "camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10)) self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(5, 10), 1e-5)) self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray()) cp =", "test_ComputeExtent(self): from pxr import Gf # Create some simple test", "should include 'allowedTokens', # provided by the property definition. visibility", "# Multiple Width Test [(-1, -1, -1), (1, 1, 1)],", "tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName self.assertEqual(tfTypeName, 'UsdGeomSphere') usdGeomSphere = s.DefinePrim('/usdGeomSphere', typeName='tfTypeName') self.assertTrue(UsdGeom.Sphere(sphere))", "Unless required by applicable law or agreed to in writing,", "sys, unittest from pxr import Sdf, Usd, UsdGeom, Vt, Gf,", "Extent Test [2], # Simple Width Test [2, 4], #", "7, 6, 5)]) camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) cp = Vt.Vec4fArray() camera.GetClippingPlanesAttr().Set(cp)", "automatically defined because # it is a registered attribute of", "with no width ] curvesWidths = [ [1], # Test", "express or implied. See the Apache License for the specific", "so # it should fall back to the extent for", "orientation attribute should be automatically defined because # it is", "b, 1e-5)) # Compute extent via generic UsdGeom.Boundable API s", "# Capsule is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule is", "[ [(1, 1, 0)], # Zero-Volume Extent Test [(0, 0,", "Points is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points is not a", "Test for i in range(numDataSets): pointsData = allPoints[i] expectedExtent =", "because # it is a registered attribute of a well", "= curvesSolutions[i] actualExtent = UsdGeom.Curves.ComputeExtent(pointsData, widths) for a, b in", "widths [] # Test Curve with no width ] curvesSolutions", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere is not a Mesh", "fall back to the extent for PointBased prims. s =", "# and everything it inherits from. Our plain prim should", "nurbsCurvesPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b,", "= curvesPoints[i] widths = curvesWidths[i] expectedExtent = curvesSolutions[i] actualExtent =", "prims emptyPoints = [] extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints) # We need", "identity = Gf.Matrix4d(1) origin = Gf.Vec3f(0, 0, 0) xform =", "self.assertTrue(xformOpOrderAttr.Set(opOrderVal)) self.assertTrue(xformOpOrderAttr.HasAuthoredValue()) self.assertNotEqual(xformOpOrderAttr.Get(), None) self.assertTrue(xformOpOrderAttr.Clear()) self.assertFalse(xformOpOrderAttr.HasAuthoredValue()) self.assertEqual(xformOpOrderAttr.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) mesh", "Tests schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "governing permissions and limitations under the Apache License. # pylint:", "nurbsCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( nurbsCurvesPrim, Usd.TimeCode.Default()) for a, b in", "via generic UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() pointsPrim = UsdGeom.Points.Define(s,", "Test, Many Points/Widths ] # Perform the correctness tests for", "s = Usd.Stage.CreateInMemory() spherePrim = s.DefinePrim('/sphere', typeName='Sphere') typelessPrim = s.DefinePrim('/regular')", "to not do it extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])), Gf.Vec3f(*map(float, extremeExtentArr[1])))", "= curvesWidths[i] expectedExtent = curvesSolutions[i] actualExtent = UsdGeom.Curves.ComputeExtent(pointsData, widths) for", "radius.IsCustom()) self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying) allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying)", "disable=map-builtin-not-iterating import sys, unittest from pxr import Sdf, Usd, UsdGeom,", "compliance with the Apache License and the following modification to", "Extent Test [(0, 0, 0)], # Simple Width Test [(-1,", "1e-5)) # Points Test for i in range(numDataSets): pointsData =", "range(numDataSets): pointsData = curvesPoints[i] widths = curvesWidths[i] expectedExtent = curvesSolutions[i]", "# Complex Test, Many Points/Widths [(3, -1, 5), (-1.5, 0,", "zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Points Test for i", "UsdGeom, Vt, Gf, Tf class TestUsdGeomSchemata(unittest.TestCase): def test_Basic(self): l =", "\"displayColor\" elif n == \"primvars:displayOpacity\": n = \"displayOpacity\" name =", "range(numDataSets): pointsData = allPoints[i] expectedExtent = pointBasedSolutions[i] actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData)", "now tests that this # fix has been reverted. We", "Usd.Stage.CreateInMemory() pointsPrim = UsdGeom.Points.Define(s, \"/Points\") pointsPrim.CreatePointsAttr(pointsData) pointsPrim.CreateWidthsAttr(widthsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(", "prims. s = Usd.Stage.CreateInMemory() meshPrim = UsdGeom.Mesh.Define(s, \"/Mesh\") meshPrim.CreatePointsAttr(pointsData) actualExtent", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere is", "a Cylinder self.assertTrue(schema.GetFocalLengthAttr()) # Capsule Tests schema = UsdGeom.Capsule.Define(stage, \"/Capsule\")", "spherePrim = s.DefinePrim('/sphere', typeName='Sphere') typelessPrim = s.DefinePrim('/regular') types = [Tf.Type.FindByName('UsdGeomSphere'),", "it'd be nice to not do it extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float,", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points", "UsdGeom.Sphere.Define(stage, \"/Sphere\") radius = sphere.GetRadiusAttr() self.assertTrue(radius.HasFallbackValue()) radiusQuery = Usd.AttributeQuery(radius) self.assertTrue(radiusQuery.HasFallbackValue())", "everything it inherits from. Our plain prim should return false", "Points is not a Cylinder self.assertTrue(schema.GetWidthsAttr()) # Scope Tests schema", "has been reverted. We no longer allow the C++ typename", "is awful, it'd be nice to not do it extremeExtentRange", "Check that no APIs have yet been applied for t", "schema.GetPrim() self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh is a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh", "= s.DefinePrim('/sphere', typeName='Sphere') tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName self.assertEqual(tfTypeName, 'UsdGeomSphere') usdGeomSphere =", "UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) # Ensure duplicates aren't picked up UsdGeom.ModelAPI.Apply(root)", "[0], # Zero-Volume Extent Test [2], # Simple Width Test", "# Sphere Tests schema = UsdGeom.Sphere.Define(stage, \"/Sphere\") self.assertTrue(schema) prim =", "Camera is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera is not a", "# compliance with the Apache License and the following modification", "list self.assertEqual(ids, resolvedIds) def test_Revert_Bug111239(self): # This used to test", "is not None: for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a,", "(1, 1, 1)], # Multiple Width Test [(-1, -1, -1),", "This License does not grant permission to use the trade", "Gf.Vec3f(0, 0, 0) xform = UsdGeom.Xform.Define(stage, \"/Xform\") # direct subclass", "Test Curve with 1 width [.5, .1], # Test Curve", "= Vt.Vec4fArray() camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) self.assertEqual(camera.GetFStopAttr().Get(), 0.0) camera.GetFStopAttr().Set(2.8) self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8,", "prim.HasAPI(Tf.Type.Unknown) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomXform')) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable')) with self.assertRaises(Tf.ErrorException): #", "pxr import Gf # Create some simple test cases allPoints", "is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points is a Xformable", "usdGeomSphere = s.DefinePrim('/usdGeomSphere', typeName='tfTypeName') self.assertTrue(UsdGeom.Sphere(sphere)) self.assertTrue('radius' in [a.GetName() for a", "is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves is a Xformable", "function, so # it should fall back to the extent", "authors typeName. mesh = UsdGeom.Mesh.Define(s, '/parent/subscope') self.assertTrue(mesh) self.assertTrue(not scope) #", "# Before we go, test that CreateXXXAttr performs as we", "in: \" + str(dir(mesh)))) def test_IsA(self): # Author Scene and", "25.4, 1e-5)) camera.GetVerticalApertureAttr().Set(2.0) self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0) self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0) camera.GetFocalLengthAttr().Set(35.0) self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0,", "no fallback value self.assertEqual(xformOpOrder.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) # Try authoring and", "layer.GetPrimAtPath('/sphere') radiusSpec = Sdf.AttributeSpec( sphereSpec, 'radius', Sdf.ValueTypeNames.Double, variability=Sdf.VariabilityUniform, declaresCustom=True) self.assertTrue(radiusSpec.custom)", "Cylinder self.assertTrue(schema.GetXformOpOrderAttr()) def test_Fallbacks(self): # Author Scene and Compose Stage", "Xformable self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder is a Cylinder self.assertTrue(schema.GetAxisAttr()) # Mesh", "self.assertEqual(xformOpOrderAttr.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) mesh = UsdGeom.Mesh.Define(stage, \"/Mesh\") # multiple ancestor", "'visibility' attribute -- should include 'allowedTokens', # provided by the", "[(-1, -1, -1), (1, 1, 1)], # Multiple Width Test", "self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Test UsdGeomCurves curvesPoints = [ [(0,0,0),", "s.DefinePrim('/sphere', typeName='Sphere') # set with list of tuples vec =", "a defined mesh, and sanity check it still evals the", "applying to the # null prim. with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim())) with", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder is a Xformable self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder is", "is not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cube Tests schema =", "self.assertTrue(schema) # Test that id's roundtrip properly, for big numbers,", "ancestor hops # PointBased and Curves curves = UsdGeom.BasisCurves.Define(stage, \"/Curves\")", "# Assert that attribute fallback values are returned for builtin", "= allPoints[i] expectedExtent = pointBasedSolutions[i] # Compute extent via generic", "b, 1e-5)) # Test UsdGeomCurves curvesPoints = [ [(0,0,0), (1,1,1),", "Cylinder self.assertTrue(schema.GetRadiusAttr()) # Xform Tests schema = UsdGeom.Xform.Define(stage, \"/Xform\") self.assertTrue(schema)", "language governing permissions and limitations under the Apache License. #", "authoring demotes to dense for non-defed prim overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh'))", "Widths/Points Test -> Returns None None, [(-2.5, -1.5, -4.5), (3.5,", "= Usd.Stage.CreateInMemory() parent = s.OverridePrim('/parent') self.assertTrue(parent) # Make a subscope.", "typename. s = Usd.Stage.CreateInMemory() sphere = s.DefinePrim('/sphere', typeName='Sphere') tfTypeName =", "0.0) camera.GetFocusDistanceAttr().Set(10.0) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0) def test_Points(self): stage = Usd.Stage.CreateInMemory() #", "have been authored at Usd.TimeCode.Default, so reading the # attribute", "= radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # List", "-1, -1), (1, 1, 1)], # Multiple Width Test [(-1,", "This is awful, it'd be nice to not do it", "at the current edit target. ori.Set(UsdGeom.Tokens.leftHanded) self.assertTrue(ori.IsDefined()) self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10)", "self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded) # # Attribute name", "Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry.IsTyped(xform)) self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable)) self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI)) def test_Concrete(self): from pxr import Tf", "self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False)) for n in mesh.GetSchemaAttributeNames(): # apiName overrides if", "Tests schema = UsdGeom.Xform.Define(stage, \"/Xform\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(), 0.825 * 25.4, 1e-5)) camera.GetHorizontalApertureAttr().Set(3.0) self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0) self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(), 0.602", "prim overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh')) overMesh.CreateDoubleSidedAttr(False, True) self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue()) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False) overMesh.CreateDoubleSidedAttr(True,", "self.assertTrue(schema.GetXformOpOrderAttr()) def test_Fallbacks(self): # Author Scene and Compose Stage stage", "Author a value, and check that it's still defined, and", "test_DefineSchema(self): s = Usd.Stage.CreateInMemory() parent = s.OverridePrim('/parent') self.assertTrue(parent) # Make", "a Cylinder self.assertTrue(schema.GetAxisAttr()) # Mesh Tests schema = UsdGeom.Mesh.Define(stage, \"/Mesh\")", "check. We expect the names returned by the schema #", "s = Usd.Stage.CreateInMemory() nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s, \"/NurbsCurves\") nurbsCurvesPrim.CreatePointsAttr(pointsData) nurbsCurvesPrim.CreateWidthsAttr(widths) actualExtent", "# PointBased and Curves curves = UsdGeom.BasisCurves.Define(stage, \"/Curves\") self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex)", "Xform is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform is a", "camera.GetFocalLengthAttr().Set(35.0) self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5)) self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000)) camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10)) self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(),", "is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves is not a Cylinder", "xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder) self.assertTrue(xformOpOrderAttr) self.assertEqual(xformOpOrderAttr.Get(), None) opOrderVal = [\"xformOp:transform\"] self.assertTrue(xformOpOrderAttr.Set(opOrderVal))", "is not a Cylinder self.assertTrue(schema.GetFaceVertexCountsAttr()) # NurbsCurves Tests schema =", "Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch is not a Cylinder self.assertTrue(schema.GetUKnotsAttr()) #", "width ] curvesWidths = [ [1], # Test Curve with", "[(-1, -1, -1), (1, 1, 1)], [(-1.5, -1, -4), (3,", "different path, should work. mesh = UsdGeom.Mesh.Define(s, '/parent/mesh') self.assertTrue(mesh) def", "# Cone is not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cube Tests", "types = [Tf.Type.FindByName('UsdGeomMotionAPI'), Tf.Type.FindByName('UsdGeomModelAPI')] # Check that no APIs have", "= [ [1], # Test Curve with 1 width [.5,", "= Tf.Type.FindByName(\"UsdGeomXform\") imageable = Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable))", "# Author Scene and Compose Stage stage = Usd.Stage.CreateInMemory() #", "self.assertTrue(radiusQuery.HasFallbackValue()) def test_DefineSchema(self): s = Usd.Stage.CreateInMemory() parent = s.OverridePrim('/parent') self.assertTrue(parent)", "not a Cylinder self.assertTrue(schema.GetKnotsAttr()) # NurbsPatch Tests schema = UsdGeom.NurbsPatch.Define(stage,", "self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform) # Definition should win. self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying)", "and defined # BasisCurves Tests schema = UsdGeom.BasisCurves.Define(stage, \"/BasisCurves\") self.assertTrue(schema)", "camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) cp = Vt.Vec4fArray() camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) self.assertEqual(camera.GetFStopAttr().Get(),", "schema. However, it's not # yet authored at the current", "in various # scenarios # Number 1: Sparse and non-sparse", "# names, trademarks, service marks, or product names of the", "self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(5, 10), 1e-5)) self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray()) cp = Vt.Vec4fArray([(1, 2,", "inherits from. Our plain prim should return false # for", "Curve with 1 width [.5, .1], # Test Curve with", "and to reproduce the content of the NOTICE file. #", "1e-5)) def test_Bug116593(self): from pxr import Gf s = Usd.Stage.CreateInMemory()", "Cylinder self.assertTrue(schema.GetUKnotsAttr()) # Points Tests schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema)", "0), (0, 0, 0)], # Simple Width Test [(-1, -1,", "Tf xform = Tf.Type.FindByName(\"UsdGeomXform\") imageable = Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\")", "width [(-.25,-.25,-.25), (3.25,1.25,1.25)], # Test Curve with 2 widths (MAX)", "5)]) camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) cp = Vt.Vec4fArray() camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp)", "= [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1)) def test_Typed(self):", "s.OverridePrim('/parent') self.assertTrue(parent) # Make a subscope. scope = UsdGeom.Scope.Define(s, '/parent/subscope')", "prim.HasAPI(Tf.Type.FindByName('UsdGeomXform')) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable')) with self.assertRaises(Tf.ErrorException): # Test with a", "make it a defined mesh, and sanity check it still", "test_Points(self): stage = Usd.Stage.CreateInMemory() # Points Tests schema = UsdGeom.Points.Define(stage,", "and Points # Test for empty points prims emptyPoints =", "self.assertTrue('allowedTokens' in visibility.GetAllMetadata()) # Assert that attribute fallback values are", "= Gf.Matrix4d(1) origin = Gf.Vec3f(0, 0, 0) xform = UsdGeom.Xform.Define(stage,", "Author Scene and Compose Stage l = Sdf.Layer.CreateAnonymous() stage =", "no width ] curvesSolutions = [ [(-.5,-.5,-.5), (3.5,1.5,1.5)], # Test", "a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere is not a Cylinder self.assertTrue(schema.GetRadiusAttr())", "spec. layer = s.GetRootLayer() sphereSpec = layer.GetPrimAtPath('/sphere') radiusSpec = Sdf.AttributeSpec(", "# BasisCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves is not", "OF ANY # KIND, either express or implied. See the", "# Mesh is a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh is a", "# # Make sure uniform access behaves as expected. #", "fix has been reverted. We no longer allow the C++", "is not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cone Tests schema =", "Test Curve with no width ] curvesSolutions = [ [(-.5,-.5,-.5),", "sure uniform access behaves as expected. # ori = p.GetAttribute(\"orientation\")", "Tests schema = UsdGeom.BasisCurves.Define(stage, \"/BasisCurves\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "it inherits from. Our plain prim should return false #", "License does not grant permission to use the trade #", "Vt.Vec4fArray() camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) self.assertEqual(camera.GetFStopAttr().Get(), 0.0) camera.GetFStopAttr().Set(2.8) self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5))", "BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either", "n == \"primvars:displayColor\": n = \"displayColor\" elif n == \"primvars:displayOpacity\":", "def'd prim mesh.CreateDoubleSidedAttr(False, True) self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue()) mesh.CreateDoubleSidedAttr(False, False) self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue()) # Number", "# Scope is not a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope is", "\"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY #", "import Usd, Tf s = Usd.Stage.CreateInMemory() spherePrim = s.DefinePrim('/sphere', typeName='Sphere')", "test_IsATypeless(self): from pxr import Usd, Tf s = Usd.Stage.CreateInMemory() spherePrim", "= UsdGeom.Boundable.ComputeExtentFromPlugins( meshPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent):", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch is not a", "= layer.GetPrimAtPath('/sphere') radiusSpec = Sdf.AttributeSpec( sphereSpec, 'radius', Sdf.ValueTypeNames.Double, variability=Sdf.VariabilityUniform, declaresCustom=True)", "check that it's still defined, and now is in fact", "= UsdGeom.Capsule.Define(stage, \"/Capsule\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule", "Sdf, Usd, UsdGeom, Vt, Gf, Tf class TestUsdGeomSchemata(unittest.TestCase): def test_Basic(self):", "for PointBased and Points # Test for empty points prims", "is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder is a Xformable", "ori = p.GetAttribute(\"orientation\") # The generic orientation attribute should be", "self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim())) def test_IsATypeless(self): from pxr import Usd, Tf s", "root.GetAppliedSchemas()) # Ensure duplicates aren't picked up UsdGeom.ModelAPI.Apply(root) self.assertEqual(['MotionAPI', 'GeomModelAPI'],", "# Multiple Width Test [2, 4, 5], # Erroneous Widths/Points", "PointBased and Curves curves = UsdGeom.BasisCurves.Define(stage, \"/Curves\") self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex) self.assertEqual(curves.GetWidthsInterpolation(),", "Points/Widths ] # Perform the correctness tests for PointBased and", "UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) # Ensure duplicates aren't", "expectedExtent = comp(pointsAsVec3fArr) actualExtent = comp(pointsAsList) for a, b in", "authoring on def'd prim mesh.CreateDoubleSidedAttr(False, True) self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue()) mesh.CreateDoubleSidedAttr(False, False) self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue())", "actualExtent = UsdGeom.Curves.ComputeExtent(pointsData, widths) for a, b in zip(expectedExtent, actualExtent):", "# Xform Tests schema = UsdGeom.Xform.Define(stage, \"/Xform\") self.assertTrue(schema) prim =", "id's roundtrip properly, for big numbers, and negative numbers ids", "use this file except in # compliance with the Apache", "xformOpOrder = xform.GetXformOpOrderAttr() self.assertFalse(xformOpOrder.HasAuthoredValue()) # xformOpOrder has no fallback value", "5.5)] # Complex Test, Many Points/Widths ] # Perform the", "50.0) camera.GetFocalLengthAttr().Set(35.0) self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5)) self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000)) camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10))", "Licensed under the Apache License, Version 2.0 (the \"Apache License\")", "1 width [(-.25,-.25,-.25), (3.25,1.25,1.25)], # Test Curve with 2 widths", "pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList) comp = UsdGeom.PointBased.ComputeExtent expectedExtent = comp(pointsAsVec3fArr) actualExtent", "s = Usd.Stage.CreateInMemory() meshPrim = UsdGeom.Mesh.Define(s, \"/Mesh\") meshPrim.CreatePointsAttr(pointsData) actualExtent =", "= Usd.Stage.CreateInMemory() # Xformable Tests identity = Gf.Matrix4d(1) origin =", "Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform is not a Cylinder self.assertTrue(schema.GetXformOpOrderAttr()) def", "actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) basisCurvesPrim = UsdGeom.BasisCurves.Define(s, \"/BasisCurves\") basisCurvesPrim.CreatePointsAttr(pointsData) basisCurvesPrim.CreateWidthsAttr(widths)", "b, 1e-5)) # Mesh Test for i in range(numDataSets): pointsData", "Default should return lh, not rh. self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded) # The", "UsdGeom.Tokens.vertex) self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex) # Before we go, test that CreateXXXAttr", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform", "properties is available and defined # BasisCurves Tests schema =", "is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch is not a Cylinder", "Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5))", "Pixar # # Licensed under the Apache License, Version 2.0", "in types: self.assertTrue(prim.HasAPI(t)) # Check that we get an exception", "Tests schema = UsdGeom.Cube.Define(stage, \"/Cube\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "with Section 4(c) of # the License and to reproduce", "typeName='Sphere') # set with list of tuples vec = [(1,2,2),(12,3,3)]", "a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Points", "Usd.Stage.CreateInMemory() # Points Tests schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) #", "6. Trademarks. is deleted and replaced with: # # 6.", "= UsdGeom.Sphere.Define(stage, \"/Sphere\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere", "return \"rightHanded\" self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded)", "trade # names, trademarks, service marks, or product names of", "Gf.Vec3f(12,3,3)) # set with Gf vecs vec = [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)]", "typelessPrim = s.DefinePrim('/regular') types = [Tf.Type.FindByName('UsdGeomSphere'), Tf.Type.FindByName('UsdGeomGprim'), Tf.Type.FindByName('UsdGeomBoundable'), Tf.Type.FindByName('UsdGeomXformable'), Tf.Type.FindByName('UsdGeomImageable'),", "= stage.DefinePrim(\"/Mesh\", \"Mesh\") self.assertTrue(p) mesh = UsdGeom.Mesh(p) self.assertTrue(mesh) self.assertTrue(mesh.GetPrim()) self.assertTrue(not", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "= Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry.IsTyped(xform)) self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable)) self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI)) def test_Concrete(self): from pxr import", "is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera is not a Cylinder", "reverted. We no longer allow the C++ typename be used", "= UsdGeom.Points.ComputeExtent(pointsData, widthsData) if actualExtent is not None and expectedExtent", "\"/Points\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points is not", "= s.DefinePrim('/sphere', typeName='Sphere') typelessPrim = s.DefinePrim('/regular') types = [Tf.Type.FindByName('UsdGeomSphere'), Tf.Type.FindByName('UsdGeomGprim'),", "\"/Xform\") # direct subclass xformOpOrder = xform.GetXformOpOrderAttr() self.assertFalse(xformOpOrder.HasAuthoredValue()) # xformOpOrder", "declaresCustom=True) self.assertTrue(radiusSpec.custom) self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform) # Definition should win. self.assertTrue(not radius.IsCustom())", "Check duplicates UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) # Ensure", "self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable')) with self.assertRaises(Tf.ErrorException): # Test with a non-applied API", "actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( meshPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent,", "API s = Usd.Stage.CreateInMemory() nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s, \"/NurbsCurves\") nurbsCurvesPrim.CreatePointsAttr(pointsData) nurbsCurvesPrim.CreateWidthsAttr(widths)", "variability=Sdf.VariabilityUniform, declaresCustom=True) self.assertTrue(radiusSpec.custom) self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform) # Definition should win. self.assertTrue(not", "# Scope has no builtins! # Sphere Tests schema =", "for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) #", "UsdGeom.Tokens.rightHanded) # # Attribute name sanity check. We expect the", "allow the C++ typename be used as # a prim's", "Sphere # and everything it inherits from. Our plain prim", "allPoints = [ [(1, 1, 0)], # Zero-Volume Extent Test", "[(-2.5, -1.5, -4.5), (3.5, 4, 5.5)] # Complex Test, Many", "= UsdGeom.Scope.Define(stage, \"/Scope\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope", "2.0) self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0) camera.GetFocalLengthAttr().Set(35.0) self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5)) self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000))", "# Simple Width Test [2, 4], # Multiple Width Test", "(MAX) [(0,0,0), (3,1,1)], # Test Curve with no width ]", "API. # UsdGeom.Mesh does not have its own compute extent", "overMesh.CreateDoubleSidedAttr(False, True) self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue()) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False) overMesh.CreateDoubleSidedAttr(True, True) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) #", "= s.DefinePrim('/usdGeomSphere', typeName='tfTypeName') self.assertTrue(UsdGeom.Sphere(sphere)) self.assertTrue('radius' in [a.GetName() for a in", "camera.GetHorizontalApertureAttr().Set(3.0) self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0) self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(), 0.602 * 25.4, 1e-5)) camera.GetVerticalApertureAttr().Set(2.0) self.assertEqual(camera.GetVerticalApertureAttr().Get(),", "self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera is Xformable self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective') camera.GetProjectionAttr().Set('orthographic') self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic')", "roundtrip properly, for big numbers, and negative numbers ids =", "= Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry.IsTyped(xform)) self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable)) self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI)) def test_Concrete(self):", "# NurbsCurves Tests schema = UsdGeom.NurbsCurves.Define(stage, \"/NurbsCurves\") self.assertTrue(schema) prim =", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "0)], # Zero-Volume Extent Test [(0, 0, 0), (0, 0,", "0)], # Zero-Volume Extent Test [(0, 0, 0)], # Simple", "License and the following modification to it: # Section 6.", "Type-Ness Checking for ComputeExtent pointsAsList = [(0, 0, 0), (1,", "for i in range(numDataSets): pointsData = allPoints[i] expectedExtent = pointBasedSolutions[i]", "at subscope's path. This transforms the scope into a #", "= UsdGeom.Mesh.Define(stage, \"/Mesh\") # multiple ancestor hops # PointBased and", "create gives us the scope back. self.assertTrue(s.OverridePrim('/parent/subscope')) self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim()) #", "required by applicable law or agreed to in writing, software", "b in zip(expectedExtent, list(actualExtent)): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Mesh Test", "1), (2, 2, 2)] pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList) comp = UsdGeom.PointBased.ComputeExtent", "= Usd.Stage.Open(l.identifier) # For every prim schema type in this", "PointBased prims. s = Usd.Stage.CreateInMemory() meshPrim = UsdGeom.Mesh.Define(s, \"/Mesh\") meshPrim.CreatePointsAttr(pointsData)", "self.assertFalse(UsdGeom.Sphere(usdGeomSphere)) self.assertFalse('radius' in [a.GetName() for a in usdGeomSphere.GetAttributes()]) def test_ComputeExtent(self):", "# distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR", "False) # List fields on 'visibility' attribute -- should include", "permissions and limitations under the Apache License. # pylint: disable=map-builtin-not-iterating", "a Mesh self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope is not a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "[a.GetName() for a in sphere.GetAttributes()]) self.assertFalse(UsdGeom.Sphere(usdGeomSphere)) self.assertFalse('radius' in [a.GetName() for", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "-4.5), (3.5, 4, 5.5)] # Complex Test, Many Points/Widths ]", "name + \"Attr\") in dir(mesh), (\"Get\" + name + \"Attr()", "should win. self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying) allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'],", "the scope into a # mesh, since Define() always authors", "compute extent function, so # it should fall back to", "\" + str(dir(mesh)))) def test_IsA(self): # Author Scene and Compose", "Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) p = stage.DefinePrim(\"/Mesh\", \"Mesh\") self.assertTrue(p) mesh", "4(c) of # the License and to reproduce the content", "-1), (1, 1, 1)], # Multiple Width Test [(-1, -1,", "= Usd.Stage.CreateInMemory() prim = s.DefinePrim('/prim') types = [Tf.Type.FindByName('UsdGeomMotionAPI'), Tf.Type.FindByName('UsdGeomModelAPI')] #", "up for t in types: self.assertTrue(prim.HasAPI(t)) # Check that we", "widths (MAX) [(0,0,0), (3,1,1)], # Test Curve with no width", "Scene and Compose Stage stage = Usd.Stage.CreateInMemory() # Xformable Tests", "10) # \"leftHanded\" should have been authored at Usd.TimeCode.Default, so", "= spherePrim.GetAttribute('primvars:displayOpacity') self.assertTrue(do.IsDefined()) self.assertTrue(do.Get() is None) def test_Camera(self): from pxr", "Usd.Stage.CreateInMemory() prim = s.DefinePrim('/sphere', typeName='Sphere') # set with list of", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves is not a", "overrides if n == \"primvars:displayColor\": n = \"displayColor\" elif n", "geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry.IsTyped(xform)) self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable)) self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI)) def test_Concrete(self): from pxr", "= Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) p = stage.DefinePrim(\"/Mesh\", \"Mesh\") self.assertTrue(p)", "= UsdGeom.NurbsPatch.Define(stage, \"/NurbsPatch\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch", "self.assertEqual(tfTypeName, 'UsdGeomSphere') usdGeomSphere = s.DefinePrim('/usdGeomSphere', typeName='tfTypeName') self.assertTrue(UsdGeom.Sphere(sphere)) self.assertTrue('radius' in [a.GetName()", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder is not a Mesh", "[ [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 1", "= n[0].upper() + n[1:] self.assertTrue((\"Get\" + name + \"Attr\") in", "and its affiliates, except as required to comply with Section", "subscope. scope = UsdGeom.Scope.Define(s, '/parent/subscope') self.assertTrue(scope) # Assert that a", "-1.5, -4.5), (3.5, 4, 5.5)] # Complex Test, Many Points/Widths", "schemas to this prim UsdGeom.ModelAPI.Apply(prim) UsdGeom.MotionAPI.Apply(prim) # Check that all", "self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim())) with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim())) def test_IsATypeless(self): from pxr import Usd,", "PointBased Test numDataSets = len(allPoints) for i in range(numDataSets): pointsData", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder is a Xformable self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder", "with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim())) with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim())) def test_IsATypeless(self): from pxr", "= [ [(1, 1, 0)], # Zero-Volume Extent Test [(0,", "authored at Usd.TimeCode.Default, so reading the # attribute at Default", "= Usd.Stage.Open(l.identifier) p = stage.DefinePrim(\"/Mesh\", \"Mesh\") self.assertTrue(p) mesh = UsdGeom.Mesh(p)", "Xform is not a Cylinder self.assertTrue(schema.GetXformOpOrderAttr()) def test_Fallbacks(self): # Author", "agreed to in writing, software # distributed under the Apache", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube is not a Cylinder self.assertTrue(schema.GetSizeAttr()) # Cylinder", "to list self.assertEqual(ids, resolvedIds) def test_Revert_Bug111239(self): # This used to", "expected. # ori = p.GetAttribute(\"orientation\") # The generic orientation attribute", "xform = UsdGeom.Xform.Define(stage, \"/Xform\") # direct subclass xformOpOrder = xform.GetXformOpOrderAttr()", "scope = UsdGeom.Scope.Define(s, '/parent/subscope') self.assertTrue(scope) # Assert that a simple", "self.assertEqual(ids, resolvedIds) def test_Revert_Bug111239(self): # This used to test a", "the # attribute at Default should return lh, not rh.", "UsdGeom.Boundable.ComputeExtentFromPlugins( basisCurvesPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a,", "been authored at Usd.TimeCode.Default, so reading the # attribute at", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone is not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cube", "1)], # Erroneous Widths/Points Test # Complex Test, Many Points/Widths", "generic UsdGeom.Boundable API. # UsdGeom.Mesh does not have its own", "for Sphere # and everything it inherits from. Our plain", "UsdGeom.Xform.Define(stage, \"/Xform\") # direct subclass xformOpOrder = xform.GetXformOpOrderAttr() self.assertFalse(xformOpOrder.HasAuthoredValue()) #", "1e-5)) # Test UsdGeomCurves curvesPoints = [ [(0,0,0), (1,1,1), (2,1,1),", "self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI)) def test_Apply(self): s = Usd.Stage.CreateInMemory('AppliedSchemas.usd') root =", "types: self.assertFalse(prim.HasAPI(t)) # Apply our schemas to this prim UsdGeom.ModelAPI.Apply(prim)", "Tf.Type.FindByName('UsdGeomImageable'), Tf.Type.FindByName('UsdTyped')] # Our sphere prim should return true on", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Compute", "pylint: disable=map-builtin-not-iterating import sys, unittest from pxr import Sdf, Usd,", "not a Cylinder self.assertTrue(schema.GetSizeAttr()) # Cylinder Tests schema = UsdGeom.Cylinder.Define(stage,", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded) # # Attribute name sanity", "def test_Basic(self): l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) p =", "Simple Width Test [(-1, -1, -1), (1, 1, 1)], #", "inheritance matches our expectations # 3. At least one of", "that we get an exception for unknown and non-API types", "\"Apache License\") # with the following modification; you may not", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "self.assertTrue(schema.GetRadiusAttr()) # Xform Tests schema = UsdGeom.Xform.Define(stage, \"/Xform\") self.assertTrue(schema) prim", "affiliates, except as required to comply with Section 4(c) of", "Ensure duplicates aren't picked up UsdGeom.ModelAPI.Apply(root) self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas()) #", "# PointBased Test numDataSets = len(allPoints) for i in range(numDataSets):", "schema = UsdGeom.Capsule.Define(stage, \"/Capsule\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "Cylinder self.assertTrue(schema.GetSizeAttr()) # Cylinder Tests schema = UsdGeom.Cylinder.Define(stage, \"/Cylinder\") self.assertTrue(schema)", "Gf.Vec2f(5, 10), 1e-5)) self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray()) cp = Vt.Vec4fArray([(1, 2, 3,", "= [ [(1, 1, 0), (1, 1, 0)], # Zero-Volume", "3)], # Multiple Width Test # Erroneous Widths/Points Test ->", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "See the Apache License for the specific # language governing", "= allPoints[i] expectedExtent = pointBasedSolutions[i] actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData) for a,", "We need to map the contents of extremeExtentArr to floats", "pointsPrim = UsdGeom.Points.Define(s, \"/Points\") pointsPrim.CreatePointsAttr(pointsData) pointsPrim.CreateWidthsAttr(widthsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( pointsPrim,", "For every prim schema type in this module, validate that:", "# language governing permissions and limitations under the Apache License.", "should # return \"rightHanded\" self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded)", "# set with list of tuples vec = [(1,2,2),(12,3,3)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec))", "0.825 * 25.4, 1e-5)) camera.GetHorizontalApertureAttr().Set(3.0) self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0) self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(), 0.602 *", "self.assertTrue(mesh.GetPrim()) self.assertTrue(not mesh.GetPointsAttr().Get(1)) self.assertEqual(p.GetTypeName(), Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType())) # # Make sure uniform", "# to match the names returned via the generic API.", "file except in # compliance with the Apache License and", "= [8589934592, 1099511627776, 0, -42] schema.CreateIdsAttr(ids) resolvedIds = list(schema.GetIdsAttr().Get()) #", "self.assertFalse(xformOpOrder.HasFallbackValue()) mesh = UsdGeom.Mesh.Define(stage, \"/Mesh\") # multiple ancestor hops #", "5), (-1.5, 0, 3), (1, 3, -2), (2, 2, -4)],", "mesh.CreateDoubleSidedAttr(False, True) self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue()) mesh.CreateDoubleSidedAttr(False, False) self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue()) # Number 2: Sparse", "for a, b in zip(expectedExtent, list(actualExtent)): self.assertTrue(Gf.IsClose(a, b, 1e-5)) #", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points is not a Mesh", "the Licensor # and its affiliates, except as required to", "= Usd.Stage.CreateInMemory() meshPrim = UsdGeom.Mesh.Define(s, \"/Mesh\") meshPrim.CreatePointsAttr(pointsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(", "known schema. However, it's not # yet authored at the", "Mesh is not a Cylinder self.assertTrue(schema.GetFaceVertexCountsAttr()) # NurbsCurves Tests schema", "License for the specific # language governing permissions and limitations", "matches our expectations # 3. At least one of its", "nurbsCurvesPrim.CreatePointsAttr(pointsData) nurbsCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( nurbsCurvesPrim, Usd.TimeCode.Default()) for a, b", "scope into a # mesh, since Define() always authors typeName.", "(3.5, 4, 5.5)] # Complex Test, Many Points/Widths ] #", "law or agreed to in writing, software # distributed under", "self.assertTrue(not mesh.GetPointsAttr().Get(1)) self.assertEqual(p.GetTypeName(), Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType())) # # Make sure uniform access", "n = \"displayColor\" elif n == \"primvars:displayOpacity\": n = \"displayOpacity\"", "# Zero-Volume Extent Test [(0, 0, 0), (0, 0, 0)],", "Tests schema = UsdGeom.NurbsCurves.Define(stage, \"/NurbsCurves\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "# NurbsCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves is", "ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) # Author a value, and check that it's still", "# Xform is not a Cylinder self.assertTrue(schema.GetXformOpOrderAttr()) def test_Fallbacks(self): #", "generic orientation attribute should be automatically defined because # it", "= p.GetAttribute(\"orientation\") # The generic orientation attribute should be automatically", "# Test Curve with 2 widths [] # Test Curve", "sphere prim should return true on IsA queries for Sphere", "\"Mesh\") self.assertTrue(p) mesh = UsdGeom.Mesh(p) self.assertTrue(mesh) self.assertTrue(mesh.GetPrim()) self.assertTrue(not mesh.GetPointsAttr().Get(1)) self.assertEqual(p.GetTypeName(),", "3. At least one of its builtin properties is available", "least one of its builtin properties is available and defined", "should be automatically defined because # it is a registered", "the Apache License for the specific # language governing permissions", "[8589934592, 1099511627776, 0, -42] schema.CreateIdsAttr(ids) resolvedIds = list(schema.GetIdsAttr().Get()) # convert", "on 'visibility' attribute -- should include 'allowedTokens', # provided by", "not have its own compute extent function, so # it", "# it is a registered attribute of a well known", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder is not a", "[(0, 0, 0), (1, 1, 1), (2, 2, 2)] pointsAsVec3fArr", "width [.5, .1], # Test Curve with 2 widths []", "\"/NurbsCurves\") nurbsCurvesPrim.CreatePointsAttr(pointsData) nurbsCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( nurbsCurvesPrim, Usd.TimeCode.Default()) for a,", "-1), (1, 1, 1)], # Simple Width Test [(-2, -2,", "authored at the current edit target. ori.Set(UsdGeom.Tokens.leftHanded) self.assertTrue(ori.IsDefined()) self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded,", "same mesh2 = UsdGeom.Mesh.Define(stage, \"/overMesh\") self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # Check querying", "10), 1e-5)) self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray()) cp = Vt.Vec4fArray([(1, 2, 3, 4),", "back to the extent for PointBased prims. s = Usd.Stage.CreateInMemory()", "radiusSpec = Sdf.AttributeSpec( sphereSpec, 'radius', Sdf.ValueTypeNames.Double, variability=Sdf.VariabilityUniform, declaresCustom=True) self.assertTrue(radiusSpec.custom) self.assertEqual(radiusSpec.variability,", "-> Returns None None, [(-2.5, -1.5, -4.5), (3.5, 4, 5.5)]", "Before we go, test that CreateXXXAttr performs as we expect", "Cylinder self.assertTrue(schema.GetWidthsAttr()) # Scope Tests schema = UsdGeom.Scope.Define(stage, \"/Scope\") self.assertTrue(schema)", "# Check that we get an exception for unknown and", "camera.GetVerticalApertureAttr().Set(2.0) self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0) self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0) camera.GetFocalLengthAttr().Set(35.0) self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5)) self.assertEqual(camera.GetClippingRangeAttr().Get(),", "a mesh at a different path, should work. mesh =", "i in range(numDataSets): pointsData = allPoints[i] expectedExtent = pointBasedSolutions[i] actualExtent", "with 1 width [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve", "tests for PointBased and Points # Test for empty points", "1000000)) camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10)) self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(5, 10), 1e-5)) self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray()) cp", "[ [(1, 1, 0), (1, 1, 0)], # Zero-Volume Extent", "Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])), Gf.Vec3f(*map(float, extremeExtentArr[1]))) self.assertTrue(extremeExtentRange.IsEmpty()) # PointBased Test numDataSets =", "not a Cylinder self.assertTrue(schema.GetXformOpOrderAttr()) def test_Fallbacks(self): # Author Scene and", "Xform is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform is not a", "0) xform = UsdGeom.Xform.Define(stage, \"/Xform\") # direct subclass xformOpOrder =", "root.GetAppliedSchemas()) # Check duplicates UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas())", "# BasisCurves Tests schema = UsdGeom.BasisCurves.Define(stage, \"/BasisCurves\") self.assertTrue(schema) prim =", "Version 2.0 (the \"Apache License\") # with the following modification;", "'GeomModelAPI'], root.GetAppliedSchemas()) # Verify that we get exceptions but don't", "self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim()) # Try to make a mesh at subscope's", "spherePrim = UsdGeom.Sphere.Define(s, '/sphere').GetPrim() radius = spherePrim.GetAttribute('radius') self.assertTrue(radius.HasMetadata('custom')) self.assertTrue(radius.HasMetadata('typeName')) self.assertTrue(radius.HasMetadata('variability'))", "\"/Camera\") self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera is Xformable self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective') camera.GetProjectionAttr().Set('orthographic') self.assertEqual(camera.GetProjectionAttr().Get(),", "Test -> Ok For Point-Based [(-1, -1, -1), (1, 1,", "# # Licensed under the Apache License, Version 2.0 (the", "set at t=10, so reading *any* time should # return", "for ComputeExtent pointsAsList = [(0, 0, 0), (1, 1, 1),", "6, 5)]) camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) cp = Vt.Vec4fArray() camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(),", "self.assertTrue(radius.HasMetadata('custom')) self.assertTrue(radius.HasMetadata('typeName')) self.assertTrue(radius.HasMetadata('variability')) self.assertTrue(radius.IsDefined()) self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetTypeName(), 'double') allMetadata =", "mesh = UsdGeom.Mesh(p) self.assertTrue(mesh) self.assertTrue(mesh.GetPrim()) self.assertTrue(not mesh.GetPointsAttr().Get(1)) self.assertEqual(p.GetTypeName(), Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType())) #", "UsdGeom.Sphere.Define(s, '/sphere').GetPrim() radius = spherePrim.GetAttribute('radius') self.assertTrue(radius.HasMetadata('custom')) self.assertTrue(radius.HasMetadata('typeName')) self.assertTrue(radius.HasMetadata('variability')) self.assertTrue(radius.IsDefined()) self.assertTrue(not", "a different path, should work. mesh = UsdGeom.Mesh.Define(s, '/parent/mesh') self.assertTrue(mesh)", "work. mesh = UsdGeom.Mesh.Define(s, '/parent/mesh') self.assertTrue(mesh) def test_BasicMetadataCases(self): s =", "layer = s.GetRootLayer() sphereSpec = layer.GetPrimAtPath('/sphere') radiusSpec = Sdf.AttributeSpec( sphereSpec,", "This used to test a change for Bug111239, but now", "Make a subscope. scope = UsdGeom.Scope.Define(s, '/parent/subscope') self.assertTrue(scope) # Assert", "UsdGeom.Sphere.Define(stage, \"/Sphere\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere is", "no longer allow the C++ typename be used as #", "typeName. mesh = UsdGeom.Mesh.Define(s, '/parent/subscope') self.assertTrue(mesh) self.assertTrue(not scope) # Make", "== \"primvars:displayColor\": n = \"displayColor\" elif n == \"primvars:displayOpacity\": n", "self.assertTrue(extremeExtentRange.IsEmpty()) # PointBased Test numDataSets = len(allPoints) for i in", "Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points is not a Cylinder self.assertTrue(schema.GetWidthsAttr()) #", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope is not a", "[Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1)) def test_Typed(self): from", "Many Points/Widths ] pointsSolutions = [ [(1, 1, 0), (1,", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves", "under the Apache License with the above modification is #", "3.0) self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(), 0.602 * 25.4, 1e-5)) camera.GetVerticalApertureAttr().Set(2.0) self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0) self.assertEqual(camera.GetFocalLengthAttr().Get(),", "not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "show up for t in types: self.assertTrue(prim.HasAPI(t)) # Check that", "schema = UsdGeom.Scope.Define(stage, \"/Scope\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cylinder is a Xformable self.assertTrue(prim.IsA(UsdGeom.Cylinder))", "not None: for a, b in zip(expectedExtent, list(actualExtent)): self.assertTrue(Gf.IsClose(a, b,", "crash when applying to the # null prim. with self.assertRaises(Tf.ErrorException):", "= Usd.Stage.CreateInMemory() camera = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera is", "Tf.Type.FindByName(\"UsdGeomXform\") imageable = Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI))", "(3,0,0)], # Test Curve with 2 widths [(0,0,0), (1,1,1), (2,1,1),", "emptyPoints = [] extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints) # We need to", "Checking for ComputeExtent pointsAsList = [(0, 0, 0), (1, 1,", "is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube is a Xformable", "# num.float32s due to the way Gf.Vec3f is wrapped out", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone", "# Camera is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera is", "Zero-Volume Extent Test [(0, 0, 0)], # Simple Width Test", "Cylinder self.assertTrue(schema.GetFocalLengthAttr()) # Capsule Tests schema = UsdGeom.Capsule.Define(stage, \"/Capsule\") self.assertTrue(schema)", "type # 2. Its type and inheritance matches our expectations", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "# Try to make a mesh at subscope's path. This", "# \"leftHanded\" should have been authored at Usd.TimeCode.Default, so reading", "Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera is not a Cylinder self.assertTrue(schema.GetFocalLengthAttr()) #", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube", "# Points is not a Cylinder self.assertTrue(schema.GetWidthsAttr()) # Scope Tests", "attribute fallback values are returned for builtin attributes. do =", "4], # Multiple Width Test [2, 4, 5], # Erroneous", "generic API. # self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0) self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False)) for n", "= pointsSolutions[i] actualExtent = UsdGeom.Points.ComputeExtent(pointsData, widthsData) if actualExtent is not", "resolvedIds = list(schema.GetIdsAttr().Get()) # convert VtArray to list self.assertEqual(ids, resolvedIds)", "exceptions but don't crash when applying to the # null", "Compute extent via generic UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() pointsPrim", "actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( pointsPrim, Usd.TimeCode.Default()) if actualExtent is not None", "Trademarks. This License does not grant permission to use the", "= UsdGeom.PointBased.ComputeExtent expectedExtent = comp(pointsAsVec3fArr) actualExtent = comp(pointsAsList) for a,", "at t=10, so reading *any* time should # return \"rightHanded\"", "[ [0], # Zero-Volume Extent Test [2], # Simple Width", "grant permission to use the trade # names, trademarks, service", "is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone is a Xformable", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope is not a Mesh self.assertFalse(prim.IsA(UsdGeom.Xformable))", "values are returned for builtin attributes. do = spherePrim.GetAttribute('primvars:displayOpacity') self.assertTrue(do.IsDefined())", "basisCurvesPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b,", "prim's typename. s = Usd.Stage.CreateInMemory() sphere = s.DefinePrim('/sphere', typeName='Sphere') tfTypeName", "as expected. # ori = p.GetAttribute(\"orientation\") # The generic orientation", "sphere = UsdGeom.Sphere.Define(stage, \"/Sphere\") radius = sphere.GetRadiusAttr() self.assertTrue(radius.HasFallbackValue()) radiusQuery =", "-42] schema.CreateIdsAttr(ids) resolvedIds = list(schema.GetIdsAttr().Get()) # convert VtArray to list", "extent via generic UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() nurbsCurvesPrim =", "is not a Cylinder # Scope has no builtins! #", "# Mesh is not a Cylinder self.assertTrue(schema.GetFaceVertexCountsAttr()) # NurbsCurves Tests", "# Test Curve with 1 width [(-.25,-.25,-.25), (3.25,1.25,1.25)], # Test", "is not a Cylinder self.assertTrue(schema.GetSizeAttr()) # Cylinder Tests schema =", "a in sphere.GetAttributes()]) self.assertFalse(UsdGeom.Sphere(usdGeomSphere)) self.assertFalse('radius' in [a.GetName() for a in", "is Xformable self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective') camera.GetProjectionAttr().Set('orthographic') self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic') self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(), 0.825 *", "None: for a, b in zip(expectedExtent, list(actualExtent)): self.assertTrue(Gf.IsClose(a, b, 1e-5))", "self.assertRaises(Tf.ErrorException): # Test with a non-applied API schema. prim.HasAPI(Tf.Type.FindByName('UsdModelAPI')) if", "-1, -1), (1, 1, 1)], # Multiple Width Test #", "mesh2 = UsdGeom.Mesh.Define(stage, \"/overMesh\") self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # Check querying of", "UsdGeom.Xform.Define(stage, \"/Xform\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform is", "# with the following modification; you may not use this", "Points is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points is a", "test_Fallbacks(self): # Author Scene and Compose Stage stage = Usd.Stage.CreateInMemory()", "with the above modification is # distributed on an \"AS", "'UsdGeomSphere') usdGeomSphere = s.DefinePrim('/usdGeomSphere', typeName='tfTypeName') self.assertTrue(UsdGeom.Sphere(sphere)) self.assertTrue('radius' in [a.GetName() for", "our applied schemas show up for t in types: self.assertTrue(prim.HasAPI(t))", "At least one of its builtin properties is available and", "to use the trade # names, trademarks, service marks, or", "Usd.Stage.CreateInMemory() nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s, \"/NurbsCurves\") nurbsCurvesPrim.CreatePointsAttr(pointsData) nurbsCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins(", "Cylinder self.assertTrue(schema.GetAxisAttr()) # Mesh Tests schema = UsdGeom.Mesh.Define(stage, \"/Mesh\") self.assertTrue(schema)", "definition. visibility = spherePrim.GetAttribute('visibility') self.assertTrue(visibility.IsDefined()) self.assertTrue('allowedTokens' in visibility.GetAllMetadata()) # Assert", "# Points is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points is not", "] curvesWidths = [ [1], # Test Curve with 1", "radius = spherePrim.GetAttribute('radius') self.assertTrue(radius.HasMetadata('custom')) self.assertTrue(radius.HasMetadata('typeName')) self.assertTrue(radius.HasMetadata('variability')) self.assertTrue(radius.IsDefined()) self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetTypeName(),", "= UsdGeom.Boundable.ComputeExtentFromPlugins( basisCurvesPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent):", "nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s, \"/NurbsCurves\") nurbsCurvesPrim.CreatePointsAttr(pointsData) nurbsCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( nurbsCurvesPrim,", "Test Curve with 1 width [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], #", "a Cylinder self.assertTrue(schema.GetBasisAttr()) # Camera Tests schema = UsdGeom.Camera.Define(stage, \"/Camera\")", "Extent Test [(-1, -1, -1), (1, 1, 1)], # Simple", "def test_Typed(self): from pxr import Tf xform = Tf.Type.FindByName(\"UsdGeomXform\") imageable", "Curves curves = UsdGeom.BasisCurves.Define(stage, \"/Curves\") self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex) self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex) #", "Zero-Volume Extent Test [2], # Simple Width Test [2, 4],", "with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable')) with self.assertRaises(Tf.ErrorException): # Test with a non-applied", "following modification to it: # Section 6. Trademarks. is deleted", "Curve with no width ] curvesWidths = [ [1], #", "Test, Many Points/Widths ] pointBasedSolutions = [ [(1, 1, 0),", "b, 1e-5)) # Points Test for i in range(numDataSets): pointsData", "this file except in # compliance with the Apache License", "Cylinder # Scope has no builtins! # Sphere Tests schema", "# Camera is Xformable self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective') camera.GetProjectionAttr().Set('orthographic') self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic') self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(),", "i in range(numDataSets): pointsData = curvesPoints[i] widths = curvesWidths[i] expectedExtent", "self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000)) camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10)) self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(5, 10), 1e-5)) self.assertEqual(camera.GetClippingPlanesAttr().Get(),", "= UsdGeom.Sphere.Define(stage, \"/Sphere\") radius = sphere.GetRadiusAttr() self.assertTrue(radius.HasFallbackValue()) radiusQuery = Usd.AttributeQuery(radius)", "*any* time should # return \"rightHanded\" self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded)", "with no width ] curvesSolutions = [ [(-.5,-.5,-.5), (3.5,1.5,1.5)], #", "Tf s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/prim') types = [Tf.Type.FindByName('UsdGeomMotionAPI'),", "zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_TypeUsage(self): # Perform Type-Ness", "imageable = Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI)) def", "Scope is not a Mesh self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope is not", "numDataSets = len(allPoints) for i in range(numDataSets): pointsData = allPoints[i]", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points is not a Cylinder self.assertTrue(schema.GetWidthsAttr()) # Scope", "should return true on IsA queries for Sphere # and", "= UsdGeom.Cone.Define(stage, \"/Cone\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone", "due to the way Gf.Vec3f is wrapped out # XXX:", "num.float32s due to the way Gf.Vec3f is wrapped out #", "UsdGeom.Scope.Define(stage, \"/Scope\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope is", "= Usd.Stage.CreateInMemory() spherePrim = UsdGeom.Sphere.Define(s, '/sphere').GetPrim() radius = spherePrim.GetAttribute('radius') self.assertTrue(radius.HasMetadata('custom'))", "\"primvars:displayColor\": n = \"displayColor\" elif n == \"primvars:displayOpacity\": n =", "self.assertTrue(do.IsDefined()) self.assertTrue(do.Get() is None) def test_Camera(self): from pxr import Gf", "Curve with 2 widths [(0,0,0), (1,1,1), (2,1,1), (3,0,0)] # Test", "Tf.Type.FindByName('UsdGeomBoundable'), Tf.Type.FindByName('UsdGeomXformable'), Tf.Type.FindByName('UsdGeomImageable'), Tf.Type.FindByName('UsdTyped')] # Our sphere prim should return", "the # null prim. with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim())) with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim()))", "a XFormable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh is not a Cylinder self.assertTrue(schema.GetFaceVertexCountsAttr())", "attribute -- should include 'allowedTokens', # provided by the property", "Check querying of fallback values. sphere = UsdGeom.Sphere.Define(stage, \"/Sphere\") radius", "if actualExtent is not None and expectedExtent is not None:", "[(0,0,0), (1,1,1), (2,1,1), (3,0,0)] # Test Curve with no width", "# Zero-Volume Extent Test [(-1, -1, -1), (1, 1, 1)],", "not # yet authored at the current edit target. self.assertTrue(ori.IsDefined())", "either express or implied. See the Apache License for the", "Apply our schemas to this prim UsdGeom.ModelAPI.Apply(prim) UsdGeom.MotionAPI.Apply(prim) # Check", "path. This transforms the scope into a # mesh, since", "but don't crash when applying to the # null prim.", "b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Compute extent", "a non-applied API schema. prim.HasAPI(Tf.Type.FindByName('UsdModelAPI')) if __name__ == \"__main__\": unittest.main()", "1, 1)], # Multiple Width Test # Erroneous Widths/Points Test", "Sparse and non-sparse authoring on def'd prim mesh.CreateDoubleSidedAttr(False, True) self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue())", "a Cylinder self.assertTrue(schema.GetXformOpOrderAttr()) def test_Fallbacks(self): # Author Scene and Compose", "= UsdGeom.Scope.Define(s, '/parent/subscope') self.assertTrue(scope) # Assert that a simple find", "2, -4)], ] allWidths = [ [0], # Zero-Volume Extent", "comparison numDataSets = len(curvesPoints) for i in range(numDataSets): pointsData =", "our schemas to this prim UsdGeom.ModelAPI.Apply(prim) UsdGeom.MotionAPI.Apply(prim) # Check that", "longer allow the C++ typename be used as # a", "# Definition should win. self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying) allMetadata =", "queries for Sphere # and everything it inherits from. Our", "Mesh self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope is not a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "no APIs have yet been applied for t in types:", "self.assertTrue(schema) prim = schema.GetPrim() self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh is a Mesh", "a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone is not a Cylinder self.assertTrue(schema.GetAxisAttr())", "ids = [8589934592, 1099511627776, 0, -42] schema.CreateIdsAttr(ids) resolvedIds = list(schema.GetIdsAttr().Get())", "allWidths = [ [0], # Zero-Volume Extent Test [2], #", "applied schemas show up for t in types: self.assertTrue(prim.HasAPI(t)) #", "self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) cp = Vt.Vec4fArray() camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) self.assertEqual(camera.GetFStopAttr().Get(), 0.0)", "it still evals the same mesh2 = UsdGeom.Mesh.Define(stage, \"/overMesh\") self.assertEqual(overMesh.GetDoubleSidedAttr().Get(),", "test that CreateXXXAttr performs as we expect in various #", "for builtin attributes. do = spherePrim.GetAttribute('primvars:displayOpacity') self.assertTrue(do.IsDefined()) self.assertTrue(do.Get() is None)", "subclass xformOpOrder = xform.GetXformOpOrderAttr() self.assertFalse(xformOpOrder.HasAuthoredValue()) # xformOpOrder has no fallback", "self.assertTrue(ori.IsDefined()) self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10) # \"leftHanded\" should have been authored", "self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # Author a custom property spec.", "Gf # Create some simple test cases allPoints = [", "the actual v. expected comparison numDataSets = len(curvesPoints) for i", "for t in types: self.assertTrue(prim.HasAPI(t)) # Check that we get", "from pxr import Gf # Create some simple test cases", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube is not a", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform is not a Mesh", "self.assertTrue(do.Get() is None) def test_Camera(self): from pxr import Gf stage", "UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points is", "The value \"rightHanded\" was set at t=10, so reading *any*", "4), (8, 7, 6, 5)]) camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) cp =", "-1, -4), (3, 3, 5)] # Complex Test, Many Points/Widths", "allPoints[i] widthsData = allWidths[i] expectedExtent = pointsSolutions[i] actualExtent = UsdGeom.Points.ComputeExtent(pointsData,", "that this # fix has been reverted. We no longer", "IsA queries for Sphere # and everything it inherits from.", "\"/Cube\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube is not", "# NurbsPatch is not a Cylinder self.assertTrue(schema.GetUKnotsAttr()) # Points Tests", "from # num.float32s due to the way Gf.Vec3f is wrapped", "to the extent for PointBased prims. s = Usd.Stage.CreateInMemory() meshPrim", "We can define a prim of its type # 2.", "self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1)) def test_Typed(self): from pxr import", "1, 0), (1, 1, 0)], # Zero-Volume Extent Test [(-1,", "via generic UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s,", "1, 0)], # Zero-Volume Extent Test [(0, 0, 0)], #", "pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf class TestUsdGeomSchemata(unittest.TestCase):", "# Test for empty points prims emptyPoints = [] extremeExtentArr", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh is not a Cylinder self.assertTrue(schema.GetFaceVertexCountsAttr()) # NurbsCurves", "self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray()) cp = Vt.Vec4fArray([(1, 2, 3, 4), (8, 7,", "-4), (3, 3, 5)] # Complex Test, Many Points/Widths ]", "the same mesh2 = UsdGeom.Mesh.Define(stage, \"/overMesh\") self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # Check", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "2 widths [(0,0,0), (1,1,1), (2,1,1), (3,0,0)] # Test Curve with", "# pylint: disable=map-builtin-not-iterating import sys, unittest from pxr import Sdf,", "of # the License and to reproduce the content of", "usdGeomSphere.GetAttributes()]) def test_ComputeExtent(self): from pxr import Gf # Create some", "Cone Tests schema = UsdGeom.Cone.Define(stage, \"/Cone\") self.assertTrue(schema) prim = schema.GetPrim()", "and now is in fact # authored at the current", "this prim UsdGeom.ModelAPI.Apply(prim) UsdGeom.MotionAPI.Apply(prim) # Check that all our applied", "and expectedExtent is not None: for a, b in zip(expectedExtent,", "BasisCurves Tests schema = UsdGeom.BasisCurves.Define(stage, \"/BasisCurves\") self.assertTrue(schema) prim = schema.GetPrim()", "self.assertTrue(schema.GetAxisAttr()) # Cube Tests schema = UsdGeom.Cube.Define(stage, \"/Cube\") self.assertTrue(schema) prim", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule is not a Mesh", "have yet been applied for t in types: self.assertFalse(prim.HasAPI(t)) #", "apiName overrides if n == \"primvars:displayColor\": n = \"displayColor\" elif", "we get an exception for unknown and non-API types with", "# Sphere is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere is not", "value \"rightHanded\" was set at t=10, so reading *any* time", "PointBased and Points # Test for empty points prims emptyPoints", "# BasisCurves is not a Cylinder self.assertTrue(schema.GetBasisAttr()) # Camera Tests", "(2,1,1), (3,0,0)], # Test Curve with 2 widths [(0,0,0), (1,1,1),", "so reading *any* time should # return \"rightHanded\" self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded)", "implied. See the Apache License for the specific # language", "curvesPoints = [ [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve", "in # compliance with the Apache License and the following", "self.assertTrue(schema.GetKnotsAttr()) # NurbsPatch Tests schema = UsdGeom.NurbsPatch.Define(stage, \"/NurbsPatch\") self.assertTrue(schema) prim", "Usd.Stage.CreateInMemory() prim = s.DefinePrim('/prim') types = [Tf.Type.FindByName('UsdGeomMotionAPI'), Tf.Type.FindByName('UsdGeomModelAPI')] # Check", "rh. self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded) # The value \"rightHanded\" was set at", "NurbsCurves is not a Cylinder self.assertTrue(schema.GetKnotsAttr()) # NurbsPatch Tests schema", "-- should include 'allowedTokens', # provided by the property definition.", "Apache License. # pylint: disable=map-builtin-not-iterating import sys, unittest from pxr", "evals the same mesh2 = UsdGeom.Mesh.Define(stage, \"/overMesh\") self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) #", "# Make a subscope. scope = UsdGeom.Scope.Define(s, '/parent/subscope') self.assertTrue(scope) #", "modification; you may not use this file except in #", "not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5)) self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000)) camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10)) self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(5,", "[(-2, -2, -2), (3, 3, 3)], # Multiple Width Test", "Gf.Matrix4d(1) origin = Gf.Vec3f(0, 0, 0) xform = UsdGeom.Xform.Define(stage, \"/Xform\")", "required to comply with Section 4(c) of # the License", "UsdGeom.NurbsCurves.Define(s, \"/NurbsCurves\") nurbsCurvesPrim.CreatePointsAttr(pointsData) nurbsCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( nurbsCurvesPrim, Usd.TimeCode.Default()) for", "with 2 widths (MAX) [(0,0,0), (3,1,1)], # Test Curve with", "def test_TypeUsage(self): # Perform Type-Ness Checking for ComputeExtent pointsAsList =", "# Capsule is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule is not", "(1, 1, 1)], # Multiple Width Test # Erroneous Widths/Points", "been reverted. We no longer allow the C++ typename be", "self.assertTrue(ori.IsDefined()) self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) # Author a value, and check that", "test_Revert_Bug111239(self): # This used to test a change for Bug111239,", "self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(), 0.602 * 25.4, 1e-5)) camera.GetVerticalApertureAttr().Set(2.0) self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0) self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0)", "a, b in zip(expectedExtent, list(actualExtent)): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Mesh", "# Check duplicates UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) #", "\"/BasisCurves\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves is not", "b, 1e-5)) basisCurvesPrim = UsdGeom.BasisCurves.Define(s, \"/BasisCurves\") basisCurvesPrim.CreatePointsAttr(pointsData) basisCurvesPrim.CreateWidthsAttr(widths) actualExtent =", "= UsdGeom.Sphere.Define(s, '/sphere').GetPrim() radius = spherePrim.GetAttribute('radius') self.assertTrue(radius.HasMetadata('custom')) self.assertTrue(radius.HasMetadata('typeName')) self.assertTrue(radius.HasMetadata('variability')) self.assertTrue(radius.IsDefined())", "None) opOrderVal = [\"xformOp:transform\"] self.assertTrue(xformOpOrderAttr.Set(opOrderVal)) self.assertTrue(xformOpOrderAttr.HasAuthoredValue()) self.assertNotEqual(xformOpOrderAttr.Get(), None) self.assertTrue(xformOpOrderAttr.Clear()) self.assertFalse(xformOpOrderAttr.HasAuthoredValue())", "# Unless required by applicable law or agreed to in", "a simple find or create gives us the scope back.", "a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube is not a Cylinder self.assertTrue(schema.GetSizeAttr())", "Assert that a simple find or create gives us the", "defined, and now is in fact # authored at the", "# fix has been reverted. We no longer allow the", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera is not a", "self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) # Ensure duplicates aren't picked up UsdGeom.ModelAPI.Apply(root) self.assertEqual(['MotionAPI',", "IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND,", "custom property spec. layer = s.GetRootLayer() sphereSpec = layer.GetPrimAtPath('/sphere') radiusSpec", "Sphere Tests schema = UsdGeom.Sphere.Define(stage, \"/Sphere\") self.assertTrue(schema) prim = schema.GetPrim()", "Tf s = Usd.Stage.CreateInMemory() spherePrim = s.DefinePrim('/sphere', typeName='Sphere') typelessPrim =", "6. Trademarks. This License does not grant permission to use", "import sys, unittest from pxr import Sdf, Usd, UsdGeom, Vt,", "a # mesh, since Define() always authors typeName. mesh =", "# Camera Tests schema = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(schema) prim =", "it extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])), Gf.Vec3f(*map(float, extremeExtentArr[1]))) self.assertTrue(extremeExtentRange.IsEmpty()) # PointBased", "for empty points prims emptyPoints = [] extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints)", "i in range(numDataSets): pointsData = allPoints[i] expectedExtent = pointBasedSolutions[i] #", "pointBasedSolutions = [ [(1, 1, 0), (1, 1, 0)], #", "Cube Tests schema = UsdGeom.Cube.Define(stage, \"/Cube\") self.assertTrue(schema) prim = schema.GetPrim()", "actualExtent is not None and expectedExtent is not None: for", "an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY", "self.assertEqual(xformOpOrder.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) # Try authoring and reverting... xformOpOrderAttr =", "is not a Cylinder self.assertTrue(schema.GetXformOpOrderAttr()) def test_Fallbacks(self): # Author Scene", "it's still defined, and now is in fact # authored", "pointsSolutions = [ [(1, 1, 0), (1, 1, 0)], #", "wrapped out # XXX: This is awful, it'd be nice", "content of the NOTICE file. # # You may obtain", "We expect the names returned by the schema # to", "# This used to test a change for Bug111239, but", "Tests schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) # Test that id's", "(1, 3, -2), (2, 2, -4)], ] allWidths = [", "and replaced with: # # 6. Trademarks. This License does", "the Apache License, Version 2.0 (the \"Apache License\") # with", "and check that it's still defined, and now is in", "# scenarios # Number 1: Sparse and non-sparse authoring on", "None and expectedExtent is not None: for a, b in", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere is not a", "uniform access behaves as expected. # ori = p.GetAttribute(\"orientation\") #", "is not a Cylinder self.assertTrue(schema.GetFocalLengthAttr()) # Capsule Tests schema =", "1e-5)) camera.GetHorizontalApertureAttr().Set(3.0) self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0) self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(), 0.602 * 25.4, 1e-5)) camera.GetVerticalApertureAttr().Set(2.0)", "not a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope is not a Cylinder", "self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope is not a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope", "def test_Revert_Bug111239(self): # This used to test a change for", "is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera is a Xformable", "# The generic orientation attribute should be automatically defined because", "typeName='Sphere') tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName self.assertEqual(tfTypeName, 'UsdGeomSphere') usdGeomSphere = s.DefinePrim('/usdGeomSphere', typeName='tfTypeName')", "n = \"displayOpacity\" name = n[0].upper() + n[1:] self.assertTrue((\"Get\" +", "prim should return false # for all of them. for", "Cylinder is a Cylinder self.assertTrue(schema.GetAxisAttr()) # Mesh Tests schema =", "UsdGeom.Mesh.Define(stage, \"/Mesh\") # multiple ancestor hops # PointBased and Curves", "pointsData = curvesPoints[i] widths = curvesWidths[i] expectedExtent = curvesSolutions[i] actualExtent", "n in mesh.GetSchemaAttributeNames(): # apiName overrides if n == \"primvars:displayColor\":", "value, and check that it's still defined, and now is", "self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded) # The value \"rightHanded\" was set at t=10,", "self.assertTrue('radius' in [a.GetName() for a in sphere.GetAttributes()]) self.assertFalse(UsdGeom.Sphere(usdGeomSphere)) self.assertFalse('radius' in", "# KIND, either express or implied. See the Apache License", "and the following modification to it: # Section 6. Trademarks.", "UsdGeom.Mesh(p) self.assertTrue(mesh) self.assertTrue(mesh.GetPrim()) self.assertTrue(not mesh.GetPointsAttr().Get(1)) self.assertEqual(p.GetTypeName(), Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType())) # # Make", "Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves is not a Cylinder self.assertTrue(schema.GetBasisAttr()) #", "Test # Erroneous Widths/Points Test -> Ok For Point-Based [(-1,", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "a in usdGeomSphere.GetAttributes()]) def test_ComputeExtent(self): from pxr import Gf #", "from pxr import Gf stage = Usd.Stage.CreateInMemory() camera = UsdGeom.Camera.Define(stage,", "has no builtins! # Sphere Tests schema = UsdGeom.Sphere.Define(stage, \"/Sphere\")", "distributed under the Apache License with the above modification is", "NOTICE file. # # You may obtain a copy of", "distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves is", "to match the names returned via the generic API. #", "Sphere is not a Cylinder self.assertTrue(schema.GetRadiusAttr()) # Xform Tests schema", "\"leftHanded\" should have been authored at Usd.TimeCode.Default, so reading the", "True) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # make it a defined mesh, and", "stage = Usd.Stage.CreateInMemory() # Xformable Tests identity = Gf.Matrix4d(1) origin", "Xformable Tests identity = Gf.Matrix4d(1) origin = Gf.Vec3f(0, 0, 0)", "do it extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])), Gf.Vec3f(*map(float, extremeExtentArr[1]))) self.assertTrue(extremeExtentRange.IsEmpty()) #", "and inheritance matches our expectations # 3. At least one", "target. self.assertTrue(ori.IsDefined()) self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) # Author a value, and check", "Apache License, Version 2.0 (the \"Apache License\") # with the", "3, -2), (2, 2, -4)], ] allWidths = [ [0],", "#!/pxrpythonsubst # # Copyright 2017 Pixar # # Licensed under", "self.assertTrue(schema.GetFaceVertexCountsAttr()) # NurbsCurves Tests schema = UsdGeom.NurbsCurves.Define(stage, \"/NurbsCurves\") self.assertTrue(schema) prim", "mesh at a different path, should work. mesh = UsdGeom.Mesh.Define(s,", "self.assertTrue((\"Get\" + name + \"Attr\") in dir(mesh), (\"Get\" + name", "= UsdGeom.Sphere._GetStaticTfType().typeName self.assertEqual(tfTypeName, 'UsdGeomSphere') usdGeomSphere = s.DefinePrim('/usdGeomSphere', typeName='tfTypeName') self.assertTrue(UsdGeom.Sphere(sphere)) self.assertTrue('radius'", "self.assertTrue(xformOpOrderAttr) self.assertEqual(xformOpOrderAttr.Get(), None) opOrderVal = [\"xformOp:transform\"] self.assertTrue(xformOpOrderAttr.Set(opOrderVal)) self.assertTrue(xformOpOrderAttr.HasAuthoredValue()) self.assertNotEqual(xformOpOrderAttr.Get(), None)", "self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5)) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0) camera.GetFocusDistanceAttr().Set(10.0) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0) def test_Points(self):", "\"Attr\") in dir(mesh), (\"Get\" + name + \"Attr() not found", "awful, it'd be nice to not do it extremeExtentRange =", "self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim())) def test_IsATypeless(self): from pxr import Usd, Tf s =", "cp = Vt.Vec4fArray() camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) self.assertEqual(camera.GetFStopAttr().Get(), 0.0) camera.GetFStopAttr().Set(2.8) self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(),", "for a in usdGeomSphere.GetAttributes()]) def test_ComputeExtent(self): from pxr import Gf", "\"/NurbsPatch\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch is not", "Cylinder self.assertTrue(schema.GetAxisAttr()) # Cube Tests schema = UsdGeom.Cube.Define(stage, \"/Cube\") self.assertTrue(schema)", "= UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) # Test that id's roundtrip properly,", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh is a XFormable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh", "Bug111239, but now tests that this # fix has been", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform is", "Test with a non-applied API schema. prim.HasAPI(Tf.Type.FindByName('UsdModelAPI')) if __name__ ==", "t in types: self.assertFalse(prim.HasAPI(t)) # Apply our schemas to this", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube is", "as we expect in various # scenarios # Number 1:", "[] extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints) # We need to map the", "# mesh, since Define() always authors typeName. mesh = UsdGeom.Mesh.Define(s,", "the NOTICE file. # # You may obtain a copy", "returned via the generic API. # self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0) self.assertNotEqual(mesh.GetSchemaAttributeNames(True),", "scenarios # Number 1: Sparse and non-sparse authoring on def'd", "# Assert that a simple find or create gives us", "-2, -2), (3, 3, 3)], # Multiple Width Test #", "with the following modification; you may not use this file", "ComputeExtent pointsAsList = [(0, 0, 0), (1, 1, 1), (2,", "+ name + \"Attr() not found in: \" + str(dir(mesh))))", "pointsSolutions[i] actualExtent = UsdGeom.Points.ComputeExtent(pointsData, widthsData) if actualExtent is not None", "\"primvars:displayOpacity\": n = \"displayOpacity\" name = n[0].upper() + n[1:] self.assertTrue((\"Get\"", "extremeExtentArr[0])), Gf.Vec3f(*map(float, extremeExtentArr[1]))) self.assertTrue(extremeExtentRange.IsEmpty()) # PointBased Test numDataSets = len(allPoints)", "it a defined mesh, and sanity check it still evals", "# Author a value, and check that it's still defined,", "stage = Usd.Stage.CreateInMemory() # Points Tests schema = UsdGeom.Points.Define(stage, \"/Points\")", "been applied for t in types: self.assertFalse(prim.HasAPI(t)) # Apply our", "Camera is not a Cylinder self.assertTrue(schema.GetFocalLengthAttr()) # Capsule Tests schema", "UsdGeom.BasisCurves.Define(s, \"/BasisCurves\") basisCurvesPrim.CreatePointsAttr(pointsData) basisCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( basisCurvesPrim, Usd.TimeCode.Default()) for", "Usd.Stage.CreateInMemory() meshPrim = UsdGeom.Mesh.Define(s, \"/Mesh\") meshPrim.CreatePointsAttr(pointsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( meshPrim,", "them. for t in types: self.assertTrue(spherePrim.IsA(t)) self.assertFalse(typelessPrim.IsA(t)) def test_HasAPI(self): from", "p = stage.DefinePrim(\"/Mesh\", \"Mesh\") self.assertTrue(p) mesh = UsdGeom.Mesh(p) self.assertTrue(mesh) self.assertTrue(mesh.GetPrim())", ".1], # Test Curve with 2 widths [] # Test", "Test [2, 4, 5], # Erroneous Widths/Points Test [1, 2,", "] allWidths = [ [0], # Zero-Volume Extent Test [2],", "Usd.Stage.CreateInMemory() sphere = s.DefinePrim('/sphere', typeName='Sphere') tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName self.assertEqual(tfTypeName, 'UsdGeomSphere')", "a Xformable self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder is a Cylinder self.assertTrue(schema.GetAxisAttr()) #", "# Mesh Test for i in range(numDataSets): pointsData = allPoints[i]", "own compute extent function, so # it should fall back", "Tf.Type.FindByName(\"UsdGeomXform\") imageable = Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry.IsTyped(xform)) self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable)) self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI))", "of tuples vec = [(1,2,2),(12,3,3)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3))", "fallback values. sphere = UsdGeom.Sphere.Define(stage, \"/Sphere\") radius = sphere.GetRadiusAttr() self.assertTrue(radius.HasFallbackValue())", "available and defined # BasisCurves Tests schema = UsdGeom.BasisCurves.Define(stage, \"/BasisCurves\")", "not a Cylinder self.assertTrue(schema.GetUKnotsAttr()) # Points Tests schema = UsdGeom.Points.Define(stage,", "in usdGeomSphere.GetAttributes()]) def test_ComputeExtent(self): from pxr import Gf # Create", "def test_BasicMetadataCases(self): s = Usd.Stage.CreateInMemory() spherePrim = UsdGeom.Sphere.Define(s, '/sphere').GetPrim() radius", "it: # Section 6. Trademarks. is deleted and replaced with:", "= pointBasedSolutions[i] # Compute extent via generic UsdGeom.Boundable API. #", "scope back. self.assertTrue(s.OverridePrim('/parent/subscope')) self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim()) # Try to make a", "origin = Gf.Vec3f(0, 0, 0) xform = UsdGeom.Xform.Define(stage, \"/Xform\") #", "= allWidths[i] expectedExtent = pointsSolutions[i] actualExtent = UsdGeom.Points.ComputeExtent(pointsData, widthsData) if", "Point-Based [(-1, -1, -1), (1, 1, 1)], [(-1.5, -1, -4),", "# 6. Trademarks. This License does not grant permission to", "ori.Set(UsdGeom.Tokens.leftHanded) self.assertTrue(ori.IsDefined()) self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10) # \"leftHanded\" should have been", "overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh')) overMesh.CreateDoubleSidedAttr(False, True) self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue()) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False) overMesh.CreateDoubleSidedAttr(True, True)", "are returned for builtin attributes. do = spherePrim.GetAttribute('primvars:displayOpacity') self.assertTrue(do.IsDefined()) self.assertTrue(do.Get()", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves is not a", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "# Xform is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform is", "self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Mesh Test for i in range(numDataSets):", "NurbsPatch is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch is a", "not found in: \" + str(dir(mesh)))) def test_IsA(self): # Author", "3, 4), (8, 7, 6, 5)]) camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) cp", "1)], [(-1.5, -1, -4), (3, 3, 5)] # Complex Test,", "nice to not do it extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])), Gf.Vec3f(*map(float,", "in zip(expectedExtent, list(actualExtent)): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Mesh Test for", "Erroneous Widths/Points Test # Complex Test, Many Points/Widths [(3, -1,", "= UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points", "5)] # Complex Test, Many Points/Widths ] pointsSolutions = [", "\"/Points\") self.assertTrue(schema) # Test that id's roundtrip properly, for big", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "except as required to comply with Section 4(c) of #", "UsdGeomCurves curvesPoints = [ [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test", "the schema # to match the names returned via the", "a Cylinder self.assertTrue(schema.GetFaceVertexCountsAttr()) # NurbsCurves Tests schema = UsdGeom.NurbsCurves.Define(stage, \"/NurbsCurves\")", "file. # # You may obtain a copy of the", "prim should return true on IsA queries for Sphere #", "\"/Mesh\") self.assertTrue(schema) prim = schema.GetPrim() self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh is a", "# Sphere is not a Cylinder self.assertTrue(schema.GetRadiusAttr()) # Xform Tests", "a value, and check that it's still defined, and now", "self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # List fields on 'visibility' attribute", "stage = Usd.Stage.CreateInMemory() camera = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera", "self.assertTrue(UsdGeom.Sphere(sphere)) self.assertTrue('radius' in [a.GetName() for a in sphere.GetAttributes()]) self.assertFalse(UsdGeom.Sphere(usdGeomSphere)) self.assertFalse('radius'", "Multiple Width Test [(-1, -1, -1), (1, 1, 1)], #", "from. Our plain prim should return false # for all", "may not use this file except in # compliance with", "copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_TypeUsage(self): # Perform", "Author a custom property spec. layer = s.GetRootLayer() sphereSpec =", "str(dir(mesh)))) def test_IsA(self): # Author Scene and Compose Stage l", "= UsdGeom.Cylinder.Define(stage, \"/Cylinder\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder", "UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded) # # Attribute", "extremeExtentArr to floats from # num.float32s due to the way", "t in types: self.assertTrue(prim.HasAPI(t)) # Check that we get an", "actualExtent = UsdGeom.Points.ComputeExtent(pointsData, widthsData) if actualExtent is not None and", "out # XXX: This is awful, it'd be nice to", "for all of them. for t in types: self.assertTrue(spherePrim.IsA(t)) self.assertFalse(typelessPrim.IsA(t))", "= UsdGeom.Mesh.Define(s, '/parent/subscope') self.assertTrue(mesh) self.assertTrue(not scope) # Make a mesh", "meshPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b,", "cases allPoints = [ [(1, 1, 0)], # Zero-Volume Extent", "check it still evals the same mesh2 = UsdGeom.Mesh.Define(stage, \"/overMesh\")", "# Number 2: Sparse authoring demotes to dense for non-defed", "self.assertTrue(radius.HasMetadata('typeName')) self.assertTrue(radius.HasMetadata('variability')) self.assertTrue(radius.IsDefined()) self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetTypeName(), 'double') allMetadata = radius.GetAllMetadata()", "attribute at Default should return lh, not rh. self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded)", "\"displayOpacity\" name = n[0].upper() + n[1:] self.assertTrue((\"Get\" + name +", "# # Copyright 2017 Pixar # # Licensed under the", "# Scope is not a Mesh self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope is", "sphereSpec = layer.GetPrimAtPath('/sphere') radiusSpec = Sdf.AttributeSpec( sphereSpec, 'radius', Sdf.ValueTypeNames.Double, variability=Sdf.VariabilityUniform,", "we get exceptions but don't crash when applying to the", "> 0) self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False)) for n in mesh.GetSchemaAttributeNames(): # apiName", "= UsdGeom.Mesh.Define(s, \"/Mesh\") meshPrim.CreatePointsAttr(pointsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( meshPrim, Usd.TimeCode.Default()) for", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere", "the contents of extremeExtentArr to floats from # num.float32s due", "is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves is not a Cylinder", "UsdGeom.Points.ComputeExtent(pointsData, widthsData) if actualExtent is not None and expectedExtent is", "not None and expectedExtent is not None: for a, b", "performs as we expect in various # scenarios # Number", "5], # Erroneous Widths/Points Test [1, 2, 2, 1] #", "self.assertTrue(mesh) self.assertTrue(not scope) # Make a mesh at a different", "Cone is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone is not a", "attributes. do = spherePrim.GetAttribute('primvars:displayOpacity') self.assertTrue(do.IsDefined()) self.assertTrue(do.Get() is None) def test_Camera(self):", "(-1.5, 0, 3), (1, 3, -2), (2, 2, -4)], ]", "2. Its type and inheritance matches our expectations # 3.", "name = n[0].upper() + n[1:] self.assertTrue((\"Get\" + name + \"Attr\")", "Sphere is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere is not a", "[(-.25,-.25,-.25), (3.25,1.25,1.25)], # Test Curve with 2 widths (MAX) [(0,0,0),", "test a change for Bug111239, but now tests that this", "schema = UsdGeom.Cylinder.Define(stage, \"/Cylinder\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "Scope is not a Cylinder # Scope has no builtins!", "UsdGeom.Sphere._GetStaticTfType().typeName self.assertEqual(tfTypeName, 'UsdGeomSphere') usdGeomSphere = s.DefinePrim('/usdGeomSphere', typeName='tfTypeName') self.assertTrue(UsdGeom.Sphere(sphere)) self.assertTrue('radius' in", "self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic') self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(), 0.825 * 25.4, 1e-5)) camera.GetHorizontalApertureAttr().Set(3.0) self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0)", "not a Cylinder self.assertTrue(schema.GetFaceVertexCountsAttr()) # NurbsCurves Tests schema = UsdGeom.NurbsCurves.Define(stage,", "a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Test", "camera.GetProjectionAttr().Set('orthographic') self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic') self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(), 0.825 * 25.4, 1e-5)) camera.GetHorizontalApertureAttr().Set(3.0) self.assertEqual(camera.GetHorizontalApertureAttr().Get(),", "not grant permission to use the trade # names, trademarks,", "Perform Type-Ness Checking for ComputeExtent pointsAsList = [(0, 0, 0),", "0), (1, 1, 0)], # Zero-Volume Extent Test [(0, 0,", "to it: # Section 6. Trademarks. is deleted and replaced", "Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) # For every prim schema type", "Test [(0, 0, 0), (0, 0, 0)], # Simple Width", "in dir(mesh), (\"Get\" + name + \"Attr() not found in:", "# Xformable Tests identity = Gf.Matrix4d(1) origin = Gf.Vec3f(0, 0,", "Gf.Vec3f(1,1,1)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1)) def test_Typed(self): from pxr", "meshPrim = UsdGeom.Mesh.Define(s, \"/Mesh\") meshPrim.CreatePointsAttr(pointsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( meshPrim, Usd.TimeCode.Default())", "is wrapped out # XXX: This is awful, it'd be", "(\"Get\" + name + \"Attr() not found in: \" +", "the way Gf.Vec3f is wrapped out # XXX: This is", "(1,1,1), (2,1,1), (3,0,0)], # Test Curve with 1 width [(0,0,0),", "elif n == \"primvars:displayOpacity\": n = \"displayOpacity\" name = n[0].upper()", "no width ] # Perform the actual v. expected comparison", "= Usd.Stage.CreateInMemory() nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s, \"/NurbsCurves\") nurbsCurvesPrim.CreatePointsAttr(pointsData) nurbsCurvesPrim.CreateWidthsAttr(widths) actualExtent =", "you may not use this file except in # compliance", "duplicates UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) # Ensure duplicates", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch is not a Cylinder self.assertTrue(schema.GetUKnotsAttr())", "true on IsA queries for Sphere # and everything it", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "\"/Curves\") self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex) self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex) # Before we go, test", "Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI)) def test_Apply(self): s = Usd.Stage.CreateInMemory('AppliedSchemas.usd') root", "types with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.Unknown) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomXform')) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable'))", "self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Points Test for i in range(numDataSets):", "self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying) allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'],", "convert VtArray to list self.assertEqual(ids, resolvedIds) def test_Revert_Bug111239(self): # This", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "def test_Apply(self): s = Usd.Stage.CreateInMemory('AppliedSchemas.usd') root = s.DefinePrim('/hello') self.assertEqual([], root.GetAppliedSchemas())", "of fallback values. sphere = UsdGeom.Sphere.Define(stage, \"/Sphere\") radius = sphere.GetRadiusAttr()", "resolvedIds) def test_Revert_Bug111239(self): # This used to test a change", "l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) p = stage.DefinePrim(\"/Mesh\", \"Mesh\")", "test_Apply(self): s = Usd.Stage.CreateInMemory('AppliedSchemas.usd') root = s.DefinePrim('/hello') self.assertEqual([], root.GetAppliedSchemas()) #", "Cylinder self.assertTrue(schema.GetBasisAttr()) # Camera Tests schema = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(schema)", "actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a,", "self.assertEqual(allMetadata['custom'], False) # List fields on 'visibility' attribute -- should", "return true on IsA queries for Sphere # and everything", "UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera is", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "with: # # 6. Trademarks. This License does not grant", "Test [(-2, -2, -2), (3, 3, 3)], # Multiple Width", "self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0) self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(), 0.602 * 25.4, 1e-5)) camera.GetVerticalApertureAttr().Set(2.0) self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0)", "Complex Test, Many Points/Widths [(3, -1, 5), (-1.5, 0, 3),", "Sdf.VariabilityVarying) allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False)", "get an exception for unknown and non-API types with self.assertRaises(Tf.ErrorException):", "\"/Sphere\") radius = sphere.GetRadiusAttr() self.assertTrue(radius.HasFallbackValue()) radiusQuery = Usd.AttributeQuery(radius) self.assertTrue(radiusQuery.HasFallbackValue()) def", "Gf.Vec3f(*map(float, extremeExtentArr[1]))) self.assertTrue(extremeExtentRange.IsEmpty()) # PointBased Test numDataSets = len(allPoints) for", "= Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) # For every prim schema", "its builtin properties is available and defined # BasisCurves Tests", "to dense for non-defed prim overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh')) overMesh.CreateDoubleSidedAttr(False, True)", "API s = Usd.Stage.CreateInMemory() pointsPrim = UsdGeom.Points.Define(s, \"/Points\") pointsPrim.CreatePointsAttr(pointsData) pointsPrim.CreateWidthsAttr(widthsData)", "\"/Mesh\") # multiple ancestor hops # PointBased and Curves curves", "# return \"rightHanded\" self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(11),", "is not a Cylinder self.assertTrue(schema.GetUKnotsAttr()) # Points Tests schema =", "points prims emptyPoints = [] extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints) # We", "on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF", "camera = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera is Xformable self.assertEqual(camera.GetProjectionAttr().Get(),", "self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1)) def test_Typed(self): from pxr import Tf xform =", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cone is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone is", "Returns None None, [(-2.5, -1.5, -4.5), (3.5, 4, 5.5)] #", "spherePrim.GetAttribute('radius') self.assertTrue(radius.HasMetadata('custom')) self.assertTrue(radius.HasMetadata('typeName')) self.assertTrue(radius.HasMetadata('variability')) self.assertTrue(radius.IsDefined()) self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetTypeName(), 'double') allMetadata", "Mesh Tests schema = UsdGeom.Mesh.Define(stage, \"/Mesh\") self.assertTrue(schema) prim = schema.GetPrim()", "= Vt.Vec3fArray(pointsAsList) comp = UsdGeom.PointBased.ComputeExtent expectedExtent = comp(pointsAsVec3fArr) actualExtent =", "current edit target. self.assertTrue(ori.IsDefined()) self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) # Author a value,", "Capsule is not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cone Tests schema", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "us the scope back. self.assertTrue(s.OverridePrim('/parent/subscope')) self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim()) # Try to", "self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded) # #", "Points Test for i in range(numDataSets): pointsData = allPoints[i] widthsData", "Vt, Gf, Tf class TestUsdGeomSchemata(unittest.TestCase): def test_Basic(self): l = Sdf.Layer.CreateAnonymous()", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points is not a", "in visibility.GetAllMetadata()) # Assert that attribute fallback values are returned", "mesh at subscope's path. This transforms the scope into a", "len(allPoints) for i in range(numDataSets): pointsData = allPoints[i] expectedExtent =", "allWidths[i] expectedExtent = pointsSolutions[i] actualExtent = UsdGeom.Points.ComputeExtent(pointsData, widthsData) if actualExtent", "curvesSolutions = [ [(-.5,-.5,-.5), (3.5,1.5,1.5)], # Test Curve with 1", "Tf.Type.FindByName('UsdGeomGprim'), Tf.Type.FindByName('UsdGeomBoundable'), Tf.Type.FindByName('UsdGeomXformable'), Tf.Type.FindByName('UsdGeomImageable'), Tf.Type.FindByName('UsdTyped')] # Our sphere prim should", "mesh = UsdGeom.Mesh.Define(stage, \"/Mesh\") # multiple ancestor hops # PointBased", "a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope is not a Cylinder #", "= UsdGeom.NurbsCurves.Define(s, \"/NurbsCurves\") nurbsCurvesPrim.CreatePointsAttr(pointsData) nurbsCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( nurbsCurvesPrim, Usd.TimeCode.Default())", "UsdGeom.Mesh(stage.OverridePrim('/overMesh')) overMesh.CreateDoubleSidedAttr(False, True) self.assertTrue(overMesh.GetDoubleSidedAttr().HasAuthoredValue()) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), False) overMesh.CreateDoubleSidedAttr(True, True) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True)", "for i in range(numDataSets): pointsData = curvesPoints[i] widths = curvesWidths[i]", "self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0) camera.GetFocusDistanceAttr().Set(10.0) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0) def test_Points(self): stage = Usd.Stage.CreateInMemory()", "# convert VtArray to list self.assertEqual(ids, resolvedIds) def test_Revert_Bug111239(self): #", "in [a.GetName() for a in usdGeomSphere.GetAttributes()]) def test_ComputeExtent(self): from pxr", "= \"displayColor\" elif n == \"primvars:displayOpacity\": n = \"displayOpacity\" name", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform is not a", "[2, 4, 5], # Erroneous Widths/Points Test [1, 2, 2,", "zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Compute extent via generic", "self.assertTrue(mesh) self.assertTrue(mesh.GetPrim()) self.assertTrue(not mesh.GetPointsAttr().Get(1)) self.assertEqual(p.GetTypeName(), Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType())) # # Make sure", "way Gf.Vec3f is wrapped out # XXX: This is awful,", "] curvesSolutions = [ [(-.5,-.5,-.5), (3.5,1.5,1.5)], # Test Curve with", "'/parent/mesh') self.assertTrue(mesh) def test_BasicMetadataCases(self): s = Usd.Stage.CreateInMemory() spherePrim = UsdGeom.Sphere.Define(s,", "import Gf s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/sphere', typeName='Sphere') #", "product names of the Licensor # and its affiliates, except", "Test [1, 2, 2, 1] # Complex Test, Many Points/Widths", "self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim())) with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim())) def test_IsATypeless(self): from pxr import", "Erroneous Widths/Points Test -> Returns None None, [(-2.5, -1.5, -4.5),", "pointBasedSolutions[i] # Compute extent via generic UsdGeom.Boundable API. # UsdGeom.Mesh", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "= [(0, 0, 0), (1, 1, 1), (2, 2, 2)]", "Curve with no width ] curvesSolutions = [ [(-.5,-.5,-.5), (3.5,1.5,1.5)],", "= comp(pointsAsVec3fArr) actualExtent = comp(pointsAsList) for a, b in zip(expectedExtent,", "Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType())) # # Make sure uniform access behaves as expected.", "self.assertTrue(Usd.SchemaRegistry.IsTyped(xform)) self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable)) self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI)) def test_Concrete(self): from pxr import Tf xform", "= UsdGeom.BasisCurves.Define(stage, \"/BasisCurves\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves", "# List fields on 'visibility' attribute -- should include 'allowedTokens',", "scope.GetPrim()) # Try to make a mesh at subscope's path.", "-1), (1, 1, 1)], # Erroneous Widths/Points Test # Complex", "visibility.GetAllMetadata()) # Assert that attribute fallback values are returned for", "not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "target. ori.Set(UsdGeom.Tokens.leftHanded) self.assertTrue(ori.IsDefined()) self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10) # \"leftHanded\" should have", "s = Usd.Stage.CreateInMemory() pointsPrim = UsdGeom.Points.Define(s, \"/Points\") pointsPrim.CreatePointsAttr(pointsData) pointsPrim.CreateWidthsAttr(widthsData) actualExtent", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Xform", "Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere is not a Cylinder self.assertTrue(schema.GetRadiusAttr()) #", "picked up UsdGeom.ModelAPI.Apply(root) self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas()) # Verify that we", "with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomXform')) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable')) with self.assertRaises(Tf.ErrorException): # Test", "typename be used as # a prim's typename. s =", "as # a prim's typename. s = Usd.Stage.CreateInMemory() sphere =", "the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "\"/Scope\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope is not", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "this module, validate that: # 1. We can define a", "still defined, and now is in fact # authored at", "to the # null prim. with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim())) with self.assertRaises(Tf.ErrorException):", "one of its builtin properties is available and defined #", "b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) basisCurvesPrim = UsdGeom.BasisCurves.Define(s,", "above modification is # distributed on an \"AS IS\" BASIS,", "xform.GetXformOpOrderAttr() self.assertFalse(xformOpOrder.HasAuthoredValue()) # xformOpOrder has no fallback value self.assertEqual(xformOpOrder.Get(), None)", "the property definition. visibility = spherePrim.GetAttribute('visibility') self.assertTrue(visibility.IsDefined()) self.assertTrue('allowedTokens' in visibility.GetAllMetadata())", "self.assertTrue(s.OverridePrim('/parent/subscope')) self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim()) # Try to make a mesh at", "'perspective') camera.GetProjectionAttr().Set('orthographic') self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic') self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(), 0.825 * 25.4, 1e-5)) camera.GetHorizontalApertureAttr().Set(3.0)", "trademarks, service marks, or product names of the Licensor #", "is a Xformable self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder is a Cylinder self.assertTrue(schema.GetAxisAttr())", "# Test Curve with no width ] curvesWidths = [", "property definition. visibility = spherePrim.GetAttribute('visibility') self.assertTrue(visibility.IsDefined()) self.assertTrue('allowedTokens' in visibility.GetAllMetadata()) #", "duplicates aren't picked up UsdGeom.ModelAPI.Apply(root) self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas()) # Verify", "Points/Widths ] pointBasedSolutions = [ [(1, 1, 0), (1, 1,", "# Test UsdGeomCurves curvesPoints = [ [(0,0,0), (1,1,1), (2,1,1), (3,0,0)],", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])), Gf.Vec3f(*map(float, extremeExtentArr[1]))) self.assertTrue(extremeExtentRange.IsEmpty()) # PointBased Test", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "# Sphere is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere is", "'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # Author a custom property", "(2,1,1), (3,0,0)], # Test Curve with 1 width [(0,0,0), (1,1,1),", "BasisCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves is a", "for unknown and non-API types with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.Unknown) with self.assertRaises(Tf.ErrorException):", "1, 1)], # Simple Width Test [(-2, -2, -2), (3,", "CreateXXXAttr performs as we expect in various # scenarios #", "properly, for big numbers, and negative numbers ids = [8589934592,", "and everything it inherits from. Our plain prim should return", "cp) self.assertEqual(camera.GetFStopAttr().Get(), 0.0) camera.GetFStopAttr().Set(2.8) self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5)) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0) camera.GetFocusDistanceAttr().Set(10.0)", "self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # Check querying of fallback values. sphere =", "self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder is a Cylinder self.assertTrue(schema.GetAxisAttr()) # Mesh Tests", "2: Sparse authoring demotes to dense for non-defed prim overMesh", "-1, -1), (1, 1, 1)], # Simple Width Test [(-2,", "non-API types with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.Unknown) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomXform')) with self.assertRaises(Tf.ErrorException):", "as required to comply with Section 4(c) of # the", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule is", "ANY # KIND, either express or implied. See the Apache", "do = spherePrim.GetAttribute('primvars:displayOpacity') self.assertTrue(do.IsDefined()) self.assertTrue(do.Get() is None) def test_Camera(self): from", "self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh is a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh is", "basisCurvesPrim = UsdGeom.BasisCurves.Define(s, \"/BasisCurves\") basisCurvesPrim.CreatePointsAttr(pointsData) basisCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( basisCurvesPrim,", "width [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 2", "a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform is not a Cylinder self.assertTrue(schema.GetXformOpOrderAttr())", "go, test that CreateXXXAttr performs as we expect in various", "[(1, 1, 0), (1, 1, 0)], # Zero-Volume Extent Test", "1e-5)) self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray()) cp = Vt.Vec4fArray([(1, 2, 3, 4), (8,", "Many Points/Widths [(3, -1, 5), (-1.5, 0, 3), (1, 3,", "self.assertTrue(spherePrim.IsA(t)) self.assertFalse(typelessPrim.IsA(t)) def test_HasAPI(self): from pxr import Usd, Tf s", "at Usd.TimeCode.Default, so reading the # attribute at Default should", "pxr import Gf s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/sphere', typeName='Sphere')", "Usd.Stage.CreateInMemory() spherePrim = UsdGeom.Sphere.Define(s, '/sphere').GetPrim() radius = spherePrim.GetAttribute('radius') self.assertTrue(radius.HasMetadata('custom')) self.assertTrue(radius.HasMetadata('typeName'))", "self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone is not a", "0.602 * 25.4, 1e-5)) camera.GetVerticalApertureAttr().Set(2.0) self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0) self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0) camera.GetFocalLengthAttr().Set(35.0)", "pointsPrim, Usd.TimeCode.Default()) if actualExtent is not None and expectedExtent is", "is deleted and replaced with: # # 6. Trademarks. This", "def test_HasAPI(self): from pxr import Usd, Tf s = Usd.Stage.CreateInMemory()", "authoring and reverting... xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder) self.assertTrue(xformOpOrderAttr) self.assertEqual(xformOpOrderAttr.Get(), None) opOrderVal", "# Verify that we get exceptions but don't crash when", "Points/Widths [(3, -1, 5), (-1.5, 0, 3), (1, 3, -2),", "numbers, and negative numbers ids = [8589934592, 1099511627776, 0, -42]", "NurbsCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves is a", "comp(pointsAsList) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5))", "Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone is not a Cylinder self.assertTrue(schema.GetAxisAttr()) #", "mesh.GetPointsAttr().Get(1)) self.assertEqual(p.GetTypeName(), Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType())) # # Make sure uniform access behaves", "the names returned via the generic API. # self.assertTrue(len(mesh.GetSchemaAttributeNames()) >", "-1), (1, 1, 1)], # Multiple Width Test # Erroneous", "UsdGeom.Tokens.vertex) # Before we go, test that CreateXXXAttr performs as", "'allowedTokens', # provided by the property definition. visibility = spherePrim.GetAttribute('visibility')", "[ [1], # Test Curve with 1 width [.5, .1],", "Scope is not a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope is not", "= s.OverridePrim('/parent') self.assertTrue(parent) # Make a subscope. scope = UsdGeom.Scope.Define(s,", "typeName='tfTypeName') self.assertTrue(UsdGeom.Sphere(sphere)) self.assertTrue('radius' in [a.GetName() for a in sphere.GetAttributes()]) self.assertFalse(UsdGeom.Sphere(usdGeomSphere))", "(3, 3, 5)] # Complex Test, Many Points/Widths ] pointsSolutions", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def", "(the \"Apache License\") # with the following modification; you may", "it is a registered attribute of a well known schema.", "# Make sure uniform access behaves as expected. # ori", "its type # 2. Its type and inheritance matches our", "self.assertEqual(ori.Get(10.1), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded) # # Attribute name sanity check.", "License, Version 2.0 (the \"Apache License\") # with the following", "mesh = UsdGeom.Mesh.Define(s, '/parent/subscope') self.assertTrue(mesh) self.assertTrue(not scope) # Make a", "= allPoints[i] widthsData = allWidths[i] expectedExtent = pointsSolutions[i] actualExtent =", "in fact # authored at the current edit target. ori.Set(UsdGeom.Tokens.leftHanded)", "in types: self.assertFalse(prim.HasAPI(t)) # Apply our schemas to this prim", "Points # Test for empty points prims emptyPoints = []", "0), (1, 1, 0)], # Zero-Volume Extent Test [(-1, -1,", "self.assertTrue(radiusSpec.custom) self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform) # Definition should win. self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetVariability(),", "up UsdGeom.ModelAPI.Apply(root) self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas()) # Verify that we get", "VtArray to list self.assertEqual(ids, resolvedIds) def test_Revert_Bug111239(self): # This used", "Vt.Vec4fArray([(1, 2, 3, 4), (8, 7, 6, 5)]) camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(),", "return lh, not rh. self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded) # The value \"rightHanded\"", "Tests schema = UsdGeom.Sphere.Define(stage, \"/Sphere\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "= [\"xformOp:transform\"] self.assertTrue(xformOpOrderAttr.Set(opOrderVal)) self.assertTrue(xformOpOrderAttr.HasAuthoredValue()) self.assertNotEqual(xformOpOrderAttr.Get(), None) self.assertTrue(xformOpOrderAttr.Clear()) self.assertFalse(xformOpOrderAttr.HasAuthoredValue()) self.assertEqual(xformOpOrderAttr.Get(), None)", "Cube is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube is not a", "vec = [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1)) def", "# 2. Its type and inheritance matches our expectations #", "reading the # attribute at Default should return lh, not", "b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Points Test", "the correctness tests for PointBased and Points # Test for", "a well known schema. However, it's not # yet authored", "NurbsCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves is not a", "UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) # Test that id's roundtrip properly, for", "# multiple ancestor hops # PointBased and Curves curves =", "the following modification to it: # Section 6. Trademarks. is", "win. self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying) allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double')", "false # for all of them. for t in types:", "a change for Bug111239, but now tests that this #", "* 25.4, 1e-5)) camera.GetVerticalApertureAttr().Set(2.0) self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0) self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0) camera.GetFocalLengthAttr().Set(35.0) self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(),", "and negative numbers ids = [8589934592, 1099511627776, 0, -42] schema.CreateIdsAttr(ids)", "is not None and expectedExtent is not None: for a,", "\"/Xform\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform is not", "that id's roundtrip properly, for big numbers, and negative numbers", "if n == \"primvars:displayColor\": n = \"displayColor\" elif n ==", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "Usd, Tf s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/prim') types =", "not do it extremeExtentRange = Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])), Gf.Vec3f(*map(float, extremeExtentArr[1]))) self.assertTrue(extremeExtentRange.IsEmpty())", "schema.CreateIdsAttr(ids) resolvedIds = list(schema.GetIdsAttr().Get()) # convert VtArray to list self.assertEqual(ids,", "= comp(pointsAsList) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b,", "Width Test [(-1, -1, -1), (1, 1, 1)], # Erroneous", "root = s.DefinePrim('/hello') self.assertEqual([], root.GetAppliedSchemas()) # Check duplicates UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'],", "list(actualExtent)): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Mesh Test for i in", "is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere is a Xformable", "= Usd.Stage.CreateInMemory() prim = s.DefinePrim('/sphere', typeName='Sphere') # set with list", "1] # Complex Test, Many Points/Widths ] pointBasedSolutions = [", "test cases allPoints = [ [(1, 1, 0)], # Zero-Volume", "(2, 2, -4)], ] allWidths = [ [0], # Zero-Volume", "Number 2: Sparse authoring demotes to dense for non-defed prim", "widthsData = allWidths[i] expectedExtent = pointsSolutions[i] actualExtent = UsdGeom.Points.ComputeExtent(pointsData, widthsData)", "Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope is not a Cylinder # Scope", "reading *any* time should # return \"rightHanded\" self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10),", "UsdGeom.Boundable.ComputeExtentFromPlugins( pointsPrim, Usd.TimeCode.Default()) if actualExtent is not None and expectedExtent", "self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetTypeName(), 'double') allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'],", "Test that id's roundtrip properly, for big numbers, and negative", "# NurbsPatch Tests schema = UsdGeom.NurbsPatch.Define(stage, \"/NurbsPatch\") self.assertTrue(schema) prim =", "a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves is not a Cylinder self.assertTrue(schema.GetKnotsAttr())", "builtin attributes. do = spherePrim.GetAttribute('primvars:displayOpacity') self.assertTrue(do.IsDefined()) self.assertTrue(do.Get() is None) def", "Gf.Vec2f(1, 1000000)) camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10)) self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(5, 10), 1e-5)) self.assertEqual(camera.GetClippingPlanesAttr().Get(), Vt.Vec4fArray())", "from pxr import Tf xform = Tf.Type.FindByName(\"UsdGeomXform\") imageable = Tf.Type.FindByName(\"UsdGeomImageable\")", "1e-5)) # Compute extent via generic UsdGeom.Boundable API s =", "Test Curve with 2 widths [] # Test Curve with", "Points/Widths ] pointsSolutions = [ [(1, 1, 0), (1, 1,", "fields on 'visibility' attribute -- should include 'allowedTokens', # provided", "Many Points/Widths ] # Perform the correctness tests for PointBased", "b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_Bug116593(self): from", "from pxr import Gf s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/sphere',", "is not a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope is not a", "Test Curve with 2 widths [(0,0,0), (1,1,1), (2,1,1), (3,0,0)] #", "actual v. expected comparison numDataSets = len(curvesPoints) for i in", "from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf class", "b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Test UsdGeomCurves", "that a simple find or create gives us the scope", "vecs vec = [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1))", "# Erroneous Widths/Points Test # Complex Test, Many Points/Widths [(3,", "exception for unknown and non-API types with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.Unknown) with", "multiple ancestor hops # PointBased and Curves curves = UsdGeom.BasisCurves.Define(stage,", "= UsdGeom.Boundable.ComputeExtentFromPlugins( nurbsCurvesPrim, Usd.TimeCode.Default()) for a, b in zip(expectedExtent, actualExtent):", "comp(pointsAsVec3fArr) actualExtent = comp(pointsAsList) for a, b in zip(expectedExtent, actualExtent):", "and Curves curves = UsdGeom.BasisCurves.Define(stage, \"/Curves\") self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex) self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex)", "1, 1), (2, 2, 2)] pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList) comp =", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere", "UsdGeom.Cylinder.Define(stage, \"/Cylinder\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder is", "schema = UsdGeom.Mesh.Define(stage, \"/Mesh\") self.assertTrue(schema) prim = schema.GetPrim() self.assertTrue(prim.IsA(UsdGeom.Mesh)) #", "self.assertFalse(xformOpOrderAttr.HasAuthoredValue()) self.assertEqual(xformOpOrderAttr.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) mesh = UsdGeom.Mesh.Define(stage, \"/Mesh\") # multiple", "unittest from pxr import Sdf, Usd, UsdGeom, Vt, Gf, Tf", "+ \"Attr\") in dir(mesh), (\"Get\" + name + \"Attr() not", "that no APIs have yet been applied for t in", "Copyright 2017 Pixar # # Licensed under the Apache License,", "[ [(-.5,-.5,-.5), (3.5,1.5,1.5)], # Test Curve with 1 width [(-.25,-.25,-.25),", "UsdGeom.Capsule.Define(stage, \"/Capsule\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule is", "\"/Cylinder\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cylinder is not", "Sparse authoring demotes to dense for non-defed prim overMesh =", "in writing, software # distributed under the Apache License with", "'orthographic') self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(), 0.825 * 25.4, 1e-5)) camera.GetHorizontalApertureAttr().Set(3.0) self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0) self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(),", "UsdGeom.Mesh.Define(stage, \"/overMesh\") self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # Check querying of fallback values.", "2 widths (MAX) [(0,0,0), (3,1,1)], # Test Curve with no", "Its type and inheritance matches our expectations # 3. At", "# set with Gf vecs vec = [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec))", "expect the names returned by the schema # to match", "should return lh, not rh. self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded) # The value", "0, 0)], # Simple Width Test [(-1, -1, -1), (1,", "This transforms the scope into a # mesh, since Define()", "UsdGeom.ModelAPI.Apply(prim) UsdGeom.MotionAPI.Apply(prim) # Check that all our applied schemas show", "prim. with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim())) with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim())) def test_IsATypeless(self): from", "names, trademarks, service marks, or product names of the Licensor", "3, 3)], # Multiple Width Test # Erroneous Widths/Points Test", "limitations under the Apache License. # pylint: disable=map-builtin-not-iterating import sys,", "# You may obtain a copy of the Apache License", "and Compose Stage stage = Usd.Stage.CreateInMemory() # Xformable Tests identity", "0, 3), (1, 3, -2), (2, 2, -4)], ] allWidths", "NurbsPatch is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch is not a", "# authored at the current edit target. ori.Set(UsdGeom.Tokens.leftHanded) self.assertTrue(ori.IsDefined()) self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget()))", "s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/sphere', typeName='Sphere') # set with", "defined mesh, and sanity check it still evals the same", "# Zero-Volume Extent Test [(0, 0, 0)], # Simple Width", "self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0) self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0) camera.GetFocalLengthAttr().Set(35.0) self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5)) self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1,", "names of the Licensor # and its affiliates, except as", "schema = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "Cylinder self.assertTrue(schema.GetFaceVertexCountsAttr()) # NurbsCurves Tests schema = UsdGeom.NurbsCurves.Define(stage, \"/NurbsCurves\") self.assertTrue(schema)", "lh, not rh. self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded) # The value \"rightHanded\" was", "a Cylinder self.assertTrue(schema.GetSizeAttr()) # Cylinder Tests schema = UsdGeom.Cylinder.Define(stage, \"/Cylinder\")", "Usd.TimeCode.Default()) if actualExtent is not None and expectedExtent is not", "Usd.Stage.CreateInMemory('AppliedSchemas.usd') root = s.DefinePrim('/hello') self.assertEqual([], root.GetAppliedSchemas()) # Check duplicates UsdGeom.MotionAPI.Apply(root)", "# the License and to reproduce the content of the", "Cylinder self.assertTrue(schema.GetKnotsAttr()) # NurbsPatch Tests schema = UsdGeom.NurbsPatch.Define(stage, \"/NurbsPatch\") self.assertTrue(schema)", "# Cube Tests schema = UsdGeom.Cube.Define(stage, \"/Cube\") self.assertTrue(schema) prim =", "= [Tf.Type.FindByName('UsdGeomMotionAPI'), Tf.Type.FindByName('UsdGeomModelAPI')] # Check that no APIs have yet", "import Usd, Tf s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/prim') types", "radius = sphere.GetRadiusAttr() self.assertTrue(radius.HasFallbackValue()) radiusQuery = Usd.AttributeQuery(radius) self.assertTrue(radiusQuery.HasFallbackValue()) def test_DefineSchema(self):", "# and its affiliates, except as required to comply with", "for t in types: self.assertTrue(spherePrim.IsA(t)) self.assertFalse(typelessPrim.IsA(t)) def test_HasAPI(self): from pxr", "specific # language governing permissions and limitations under the Apache", "# Make a mesh at a different path, should work.", "a Cylinder self.assertTrue(schema.GetRadiusAttr()) # Xform Tests schema = UsdGeom.Xform.Define(stage, \"/Xform\")", "is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves is a Xformable", "Xformable self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective') camera.GetProjectionAttr().Set('orthographic') self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic') self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(), 0.825 * 25.4,", "Multiple Width Test # Erroneous Widths/Points Test -> Ok For", "(3.25,1.25,1.25)], # Test Curve with 2 widths (MAX) [(0,0,0), (3,1,1)],", "Width Test # Erroneous Widths/Points Test -> Returns None None,", "typeName='Sphere') typelessPrim = s.DefinePrim('/regular') types = [Tf.Type.FindByName('UsdGeomSphere'), Tf.Type.FindByName('UsdGeomGprim'), Tf.Type.FindByName('UsdGeomBoundable'), Tf.Type.FindByName('UsdGeomXformable'),", "self.assertTrue(schema.GetUKnotsAttr()) # Points Tests schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) prim", "simple find or create gives us the scope back. self.assertTrue(s.OverridePrim('/parent/subscope'))", "test_BasicMetadataCases(self): s = Usd.Stage.CreateInMemory() spherePrim = UsdGeom.Sphere.Define(s, '/sphere').GetPrim() radius =", "simple test cases allPoints = [ [(1, 1, 0)], #", "# Check that all our applied schemas show up for", "was set at t=10, so reading *any* time should #", "Sphere is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Sphere is a", "UsdGeom.ModelAPI.Apply(root) self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas()) # Verify that we get exceptions", "self.assertFalse(typelessPrim.IsA(t)) def test_HasAPI(self): from pxr import Usd, Tf s =", "Capsule is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule is a", "values. sphere = UsdGeom.Sphere.Define(stage, \"/Sphere\") radius = sphere.GetRadiusAttr() self.assertTrue(radius.HasFallbackValue()) radiusQuery", "UsdGeom.Mesh.Define(s, '/parent/subscope') self.assertTrue(mesh) self.assertTrue(not scope) # Make a mesh at", "is available and defined # BasisCurves Tests schema = UsdGeom.BasisCurves.Define(stage,", "under the Apache License. # pylint: disable=map-builtin-not-iterating import sys, unittest", "Scene and Compose Stage l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier)", "it's not # yet authored at the current edit target.", "of its type # 2. Its type and inheritance matches", "a Cylinder self.assertTrue(schema.GetKnotsAttr()) # NurbsPatch Tests schema = UsdGeom.NurbsPatch.Define(stage, \"/NurbsPatch\")", "with a non-applied API schema. prim.HasAPI(Tf.Type.FindByName('UsdModelAPI')) if __name__ == \"__main__\":", "Tests schema = UsdGeom.NurbsPatch.Define(stage, \"/NurbsPatch\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "False) self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue()) # Number 2: Sparse authoring demotes to dense", "in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Compute extent via", "hops # PointBased and Curves curves = UsdGeom.BasisCurves.Define(stage, \"/Curves\") self.assertEqual(curves.GetNormalsInterpolation(),", "cp = Vt.Vec4fArray([(1, 2, 3, 4), (8, 7, 6, 5)])", "2.0 (the \"Apache License\") # with the following modification; you", "Multiple Width Test [2, 4, 5], # Erroneous Widths/Points Test", "# Cone Tests schema = UsdGeom.Cone.Define(stage, \"/Cone\") self.assertTrue(schema) prim =", "registered attribute of a well known schema. However, it's not", "Sdf.VariabilityUniform) # Definition should win. self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying) allMetadata", "1 width [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with", "self.assertTrue(schema.GetSizeAttr()) # Cylinder Tests schema = UsdGeom.Cylinder.Define(stage, \"/Cylinder\") self.assertTrue(schema) prim", "= schema.GetPrim() self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh is a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "s.GetRootLayer() sphereSpec = layer.GetPrimAtPath('/sphere') radiusSpec = Sdf.AttributeSpec( sphereSpec, 'radius', Sdf.ValueTypeNames.Double,", "\"Attr() not found in: \" + str(dir(mesh)))) def test_IsA(self): #", "# Mesh is a XFormable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh is not", "UsdGeom.Cone.Define(stage, \"/Cone\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone is", "that attribute fallback values are returned for builtin attributes. do", "# Our sphere prim should return true on IsA queries", "not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas()) # Verify that we get exceptions but", "WARRANTIES OR CONDITIONS OF ANY # KIND, either express or", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera", "for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) basisCurvesPrim", "1, 1)], # Erroneous Widths/Points Test # Complex Test, Many", "edit target. ori.Set(UsdGeom.Tokens.leftHanded) self.assertTrue(ori.IsDefined()) self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10) # \"leftHanded\" should", "self.assertEqual(radius.GetTypeName(), 'double') allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'],", "mesh.GetSchemaAttributeNames(): # apiName overrides if n == \"primvars:displayColor\": n =", "self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_Bug116593(self): from pxr import Gf s", "(8, 7, 6, 5)]) camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) cp = Vt.Vec4fArray()", "querying of fallback values. sphere = UsdGeom.Sphere.Define(stage, \"/Sphere\") radius =", "[.5, .1], # Test Curve with 2 widths [] #", "gives us the scope back. self.assertTrue(s.OverridePrim('/parent/subscope')) self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim()) # Try", "test_Bug116593(self): from pxr import Gf s = Usd.Stage.CreateInMemory() prim =", "fallback values are returned for builtin attributes. do = spherePrim.GetAttribute('primvars:displayOpacity')", "UsdGeom.NurbsPatch.Define(stage, \"/NurbsPatch\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch is", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope is not a Mesh self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope", "Attribute name sanity check. We expect the names returned by", "the names returned by the schema # to match the", "s.DefinePrim('/sphere', typeName='Sphere') tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName self.assertEqual(tfTypeName, 'UsdGeomSphere') usdGeomSphere = s.DefinePrim('/usdGeomSphere',", "self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Compute extent via generic UsdGeom.Boundable API", "zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) basisCurvesPrim = UsdGeom.BasisCurves.Define(s, \"/BasisCurves\") basisCurvesPrim.CreatePointsAttr(pointsData)", "found in: \" + str(dir(mesh)))) def test_IsA(self): # Author Scene", "1e-5)) # Mesh Test for i in range(numDataSets): pointsData =", "= UsdGeom.Mesh.Define(s, '/parent/mesh') self.assertTrue(mesh) def test_BasicMetadataCases(self): s = Usd.Stage.CreateInMemory() spherePrim", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "# Test Curve with 2 widths [(0,0,0), (1,1,1), (2,1,1), (3,0,0)]", "expectedExtent = pointsSolutions[i] actualExtent = UsdGeom.Points.ComputeExtent(pointsData, widthsData) if actualExtent is", "Gf s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/sphere', typeName='Sphere') # set", "KIND, either express or implied. See the Apache License for", "every prim schema type in this module, validate that: #", "# NurbsCurves is not a Cylinder self.assertTrue(schema.GetKnotsAttr()) # NurbsPatch Tests", "'/parent/subscope') self.assertTrue(scope) # Assert that a simple find or create", "Widths/Points Test -> Ok For Point-Based [(-1, -1, -1), (1,", "= [Tf.Type.FindByName('UsdGeomSphere'), Tf.Type.FindByName('UsdGeomGprim'), Tf.Type.FindByName('UsdGeomBoundable'), Tf.Type.FindByName('UsdGeomXformable'), Tf.Type.FindByName('UsdGeomImageable'), Tf.Type.FindByName('UsdTyped')] # Our sphere", "schema # to match the names returned via the generic", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves", "0.0) camera.GetFStopAttr().Set(2.8) self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5)) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0) camera.GetFocusDistanceAttr().Set(10.0) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0)", "= len(curvesPoints) for i in range(numDataSets): pointsData = curvesPoints[i] widths", "make a mesh at subscope's path. This transforms the scope", "range(numDataSets): pointsData = allPoints[i] expectedExtent = pointBasedSolutions[i] # Compute extent", "[(0, 0, 0)], # Simple Width Test [(-1, -1, -1),", "1)], # Simple Width Test [(-2, -2, -2), (3, 3,", "in [a.GetName() for a in sphere.GetAttributes()]) self.assertFalse(UsdGeom.Sphere(usdGeomSphere)) self.assertFalse('radius' in [a.GetName()", "Make a mesh at a different path, should work. mesh", "def test_Concrete(self): from pxr import Tf xform = Tf.Type.FindByName(\"UsdGeomXform\") imageable", "Apache License with the above modification is # distributed on", "UsdGeom.Mesh does not have its own compute extent function, so", "prim = s.DefinePrim('/sphere', typeName='Sphere') # set with list of tuples", "\"/Sphere\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere is not", "prim mesh.CreateDoubleSidedAttr(False, True) self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue()) mesh.CreateDoubleSidedAttr(False, False) self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue()) # Number 2:", "self.assertTrue(visibility.IsDefined()) self.assertTrue('allowedTokens' in visibility.GetAllMetadata()) # Assert that attribute fallback values", "of the NOTICE file. # # You may obtain a", "\"/NurbsCurves\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves is not", "= UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera", "# Erroneous Widths/Points Test -> Ok For Point-Based [(-1, -1,", "(2,1,1), (3,0,0)] # Test Curve with no width ] curvesWidths", "not a Cylinder self.assertTrue(schema.GetFocalLengthAttr()) # Capsule Tests schema = UsdGeom.Capsule.Define(stage,", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves is not a Mesh", "Author Scene and Compose Stage stage = Usd.Stage.CreateInMemory() # Xformable", "Widths/Points Test # Complex Test, Many Points/Widths [(3, -1, 5),", "widths) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5))", "Curve with no width ] # Perform the actual v.", "used as # a prim's typename. s = Usd.Stage.CreateInMemory() sphere", "a prim's typename. s = Usd.Stage.CreateInMemory() sphere = s.DefinePrim('/sphere', typeName='Sphere')", "0, -42] schema.CreateIdsAttr(ids) resolvedIds = list(schema.GetIdsAttr().Get()) # convert VtArray to", "not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "be used as # a prim's typename. s = Usd.Stage.CreateInMemory()", "0) self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False)) for n in mesh.GetSchemaAttributeNames(): # apiName overrides", "t in types: self.assertTrue(spherePrim.IsA(t)) self.assertFalse(typelessPrim.IsA(t)) def test_HasAPI(self): from pxr import", "of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "we go, test that CreateXXXAttr performs as we expect in", "allPoints[i] expectedExtent = pointBasedSolutions[i] # Compute extent via generic UsdGeom.Boundable", "self.assertFalse(xformOpOrder.HasAuthoredValue()) # xformOpOrder has no fallback value self.assertEqual(xformOpOrder.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue())", "xformOpOrder has no fallback value self.assertEqual(xformOpOrder.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) # Try", "map the contents of extremeExtentArr to floats from # num.float32s", "provided by the property definition. visibility = spherePrim.GetAttribute('visibility') self.assertTrue(visibility.IsDefined()) self.assertTrue('allowedTokens'", "self.assertTrue(not scope) # Make a mesh at a different path,", "spherePrim.GetAttribute('primvars:displayOpacity') self.assertTrue(do.IsDefined()) self.assertTrue(do.Get() is None) def test_Camera(self): from pxr import", "= UsdGeom.Points.Define(s, \"/Points\") pointsPrim.CreatePointsAttr(pointsData) pointsPrim.CreateWidthsAttr(widthsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( pointsPrim, Usd.TimeCode.Default())", "We no longer allow the C++ typename be used as", "Complex Test, Many Points/Widths ] pointBasedSolutions = [ [(1, 1,", "that CreateXXXAttr performs as we expect in various # scenarios", "our expectations # 3. At least one of its builtin", "API. # self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0) self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False)) for n in", "def test_IsATypeless(self): from pxr import Usd, Tf s = Usd.Stage.CreateInMemory()", "attribute should be automatically defined because # it is a", "Gf stage = Usd.Stage.CreateInMemory() camera = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) #", "imageable = Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry.IsTyped(xform)) self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable)) self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI)) def", "used to test a change for Bug111239, but now tests", "= UsdGeom.Boundable.ComputeExtentFromPlugins( pointsPrim, Usd.TimeCode.Default()) if actualExtent is not None and", "# For every prim schema type in this module, validate", "an exception for unknown and non-API types with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.Unknown)", "extent via generic UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() pointsPrim =", "is # distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES", "Camera Tests schema = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(schema) prim = schema.GetPrim()", "in types: self.assertTrue(spherePrim.IsA(t)) self.assertFalse(typelessPrim.IsA(t)) def test_HasAPI(self): from pxr import Usd,", "not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "comp = UsdGeom.PointBased.ComputeExtent expectedExtent = comp(pointsAsVec3fArr) actualExtent = comp(pointsAsList) for", "Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1)) def test_Typed(self): from pxr import Tf xform", "Scope Tests schema = UsdGeom.Scope.Define(stage, \"/Scope\") self.assertTrue(schema) prim = schema.GetPrim()", "# provided by the property definition. visibility = spherePrim.GetAttribute('visibility') self.assertTrue(visibility.IsDefined())", "self.assertTrue(schema.GetAxisAttr()) # Mesh Tests schema = UsdGeom.Mesh.Define(stage, \"/Mesh\") self.assertTrue(schema) prim", "UsdGeom.Mesh.Define(stage, \"/Mesh\") self.assertTrue(schema) prim = schema.GetPrim() self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh is", "that it's still defined, and now is in fact #", "cp) cp = Vt.Vec4fArray() camera.GetClippingPlanesAttr().Set(cp) self.assertEqual(camera.GetClippingPlanesAttr().Get(), cp) self.assertEqual(camera.GetFStopAttr().Get(), 0.0) camera.GetFStopAttr().Set(2.8)", "[(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 1 width", "time should # return \"rightHanded\" self.assertEqual(ori.Get(9.9), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10), UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(10.1),", "a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) basisCurvesPrim =", "is not a Cylinder self.assertTrue(schema.GetWidthsAttr()) # Scope Tests schema =", "back. self.assertTrue(s.OverridePrim('/parent/subscope')) self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim()) # Try to make a mesh", "# attribute at Default should return lh, not rh. self.assertEqual(ori.Get(),", "= [] extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints) # We need to map", "UsdGeom.Mesh.Define(s, '/parent/mesh') self.assertTrue(mesh) def test_BasicMetadataCases(self): s = Usd.Stage.CreateInMemory() spherePrim =", "in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) basisCurvesPrim = UsdGeom.BasisCurves.Define(s, \"/BasisCurves\")", "= list(schema.GetIdsAttr().Get()) # convert VtArray to list self.assertEqual(ids, resolvedIds) def", "self.assertTrue(radius.HasFallbackValue()) radiusQuery = Usd.AttributeQuery(radius) self.assertTrue(radiusQuery.HasFallbackValue()) def test_DefineSchema(self): s = Usd.Stage.CreateInMemory()", "extremeExtentArr[1]))) self.assertTrue(extremeExtentRange.IsEmpty()) # PointBased Test numDataSets = len(allPoints) for i", "pointsPrim.CreateWidthsAttr(widthsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( pointsPrim, Usd.TimeCode.Default()) if actualExtent is not", "Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule is not a Cylinder self.assertTrue(schema.GetAxisAttr()) #", "non-sparse authoring on def'd prim mesh.CreateDoubleSidedAttr(False, True) self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue()) mesh.CreateDoubleSidedAttr(False, False)", "self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI)) def test_Apply(self): s = Usd.Stage.CreateInMemory('AppliedSchemas.usd') root = s.DefinePrim('/hello') self.assertEqual([],", "b, 1e-5)) def test_TypeUsage(self): # Perform Type-Ness Checking for ComputeExtent", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch is not a Cylinder self.assertTrue(schema.GetUKnotsAttr()) # Points", "property spec. layer = s.GetRootLayer() sphereSpec = layer.GetPrimAtPath('/sphere') radiusSpec =", "# Author Scene and Compose Stage l = Sdf.Layer.CreateAnonymous() stage", "= Tf.Type.FindByName(\"UsdGeomXform\") imageable = Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry.IsTyped(xform)) self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable))", "self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded) # # Attribute name sanity check. We expect", "self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) # Author a value, and check that it's", "fact # authored at the current edit target. ori.Set(UsdGeom.Tokens.leftHanded) self.assertTrue(ori.IsDefined())", "Test numDataSets = len(allPoints) for i in range(numDataSets): pointsData =", "Apache License and the following modification to it: # Section", "self.assertTrue(schema.GetBasisAttr()) # Camera Tests schema = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(schema) prim", "expect in various # scenarios # Number 1: Sparse and", "WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express", "to make a mesh at subscope's path. This transforms the", "to reproduce the content of the NOTICE file. # #", "2)] pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList) comp = UsdGeom.PointBased.ComputeExtent expectedExtent = comp(pointsAsVec3fArr)", "except in # compliance with the Apache License and the", "radius.IsCustom()) self.assertEqual(radius.GetTypeName(), 'double') allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying)", "# Compute extent via generic UsdGeom.Boundable API. # UsdGeom.Mesh does", "* 25.4, 1e-5)) camera.GetHorizontalApertureAttr().Set(3.0) self.assertEqual(camera.GetHorizontalApertureAttr().Get(), 3.0) self.assertTrue(Gf.IsClose(camera.GetVerticalApertureAttr().Get(), 0.602 * 25.4,", "# Cube is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube is", "Gf.Vec3f(1,1,1)) def test_Typed(self): from pxr import Tf xform = Tf.Type.FindByName(\"UsdGeomXform\")", "find or create gives us the scope back. self.assertTrue(s.OverridePrim('/parent/subscope')) self.assertEqual(s.OverridePrim('/parent/subscope'),", "for a in sphere.GetAttributes()]) self.assertFalse(UsdGeom.Sphere(usdGeomSphere)) self.assertFalse('radius' in [a.GetName() for a", "extent function, so # it should fall back to the", "0, 0) xform = UsdGeom.Xform.Define(stage, \"/Xform\") # direct subclass xformOpOrder", "3), (1, 3, -2), (2, 2, -4)], ] allWidths =", "with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.Unknown) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomXform')) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable')) with", "\"/Cone\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone is not", "to the way Gf.Vec3f is wrapped out # XXX: This", "schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "# NurbsPatch is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch is", "2, 2, 1] # Complex Test, Many Points/Widths ] pointBasedSolutions", "1, 1)], [(-1.5, -1, -4), (3, 3, 5)] # Complex", "extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints) # We need to map the contents", "stage = Usd.Stage.Open(l.identifier) p = stage.DefinePrim(\"/Mesh\", \"Mesh\") self.assertTrue(p) mesh =", "-1, -1), (1, 1, 1)], # Erroneous Widths/Points Test #", "l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) # For every prim", "# 1. We can define a prim of its type", "self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(1,1,1)) def test_Typed(self): from pxr import Tf", "Widths/Points Test [1, 2, 2, 1] # Complex Test, Many", "is None) def test_Camera(self): from pxr import Gf stage =", "self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0) camera.GetFocalLengthAttr().Set(35.0) self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5)) self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000)) camera.GetClippingRangeAttr().Set(Gf.Vec2f(5,", "for Bug111239, but now tests that this # fix has", "Test [2], # Simple Width Test [2, 4], # Multiple", "is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform is not a Cylinder", "following modification; you may not use this file except in", "'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # List fields on 'visibility'", "pointsData = allPoints[i] widthsData = allWidths[i] expectedExtent = pointsSolutions[i] actualExtent", "2, 2)] pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList) comp = UsdGeom.PointBased.ComputeExtent expectedExtent =", "s = Usd.Stage.CreateInMemory('AppliedSchemas.usd') root = s.DefinePrim('/hello') self.assertEqual([], root.GetAppliedSchemas()) # Check", "True) # make it a defined mesh, and sanity check", "the C++ typename be used as # a prim's typename.", "# Complex Test, Many Points/Widths ] # Perform the correctness", "the extent for PointBased prims. s = Usd.Stage.CreateInMemory() meshPrim =", "floats from # num.float32s due to the way Gf.Vec3f is", "self.assertTrue(p) mesh = UsdGeom.Mesh(p) self.assertTrue(mesh) self.assertTrue(mesh.GetPrim()) self.assertTrue(not mesh.GetPointsAttr().Get(1)) self.assertEqual(p.GetTypeName(), Usd.SchemaRegistry().GetSchemaTypeName(mesh._GetStaticTfType()))", "= UsdGeom.BasisCurves.Define(s, \"/BasisCurves\") basisCurvesPrim.CreatePointsAttr(pointsData) basisCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( basisCurvesPrim, Usd.TimeCode.Default())", "Erroneous Widths/Points Test [1, 2, 2, 1] # Complex Test,", "Apache License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch is a Xformable", "no builtins! # Sphere Tests schema = UsdGeom.Sphere.Define(stage, \"/Sphere\") self.assertTrue(schema)", "# Cube is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cube is not", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Sphere is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "correctness tests for PointBased and Points # Test for empty", "[(3, -1, 5), (-1.5, 0, 3), (1, 3, -2), (2,", "self.assertEqual([], root.GetAppliedSchemas()) # Check duplicates UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'],", "widthsData) if actualExtent is not None and expectedExtent is not", "with the Apache License and the following modification to it:", "with 1 width [(-.25,-.25,-.25), (3.25,1.25,1.25)], # Test Curve with 2", "= Gf.Vec3f(0, 0, 0) xform = UsdGeom.Xform.Define(stage, \"/Xform\") # direct", "schema = UsdGeom.BasisCurves.Define(stage, \"/BasisCurves\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "root.GetAppliedSchemas()) # Verify that we get exceptions but don't crash", "deleted and replaced with: # # 6. Trademarks. This License", "# yet authored at the current edit target. self.assertTrue(ori.IsDefined()) self.assertTrue(not", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "Cube is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Cube is a", "defined # BasisCurves Tests schema = UsdGeom.BasisCurves.Define(stage, \"/BasisCurves\") self.assertTrue(schema) prim", "names returned by the schema # to match the names", "match the names returned via the generic API. # self.assertTrue(len(mesh.GetSchemaAttributeNames())", "subscope's path. This transforms the scope into a # mesh,", "in range(numDataSets): pointsData = curvesPoints[i] widths = curvesWidths[i] expectedExtent =", "of them. for t in types: self.assertTrue(spherePrim.IsA(t)) self.assertFalse(typelessPrim.IsA(t)) def test_HasAPI(self):", "UsdGeom.Scope.Define(s, '/parent/subscope') self.assertTrue(scope) # Assert that a simple find or", "with list of tuples vec = [(1,2,2),(12,3,3)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2))", "= UsdGeom.PointBased.ComputeExtent(pointsData) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b,", "] # Perform the correctness tests for PointBased and Points", "= [ [(-.5,-.5,-.5), (3.5,1.5,1.5)], # Test Curve with 1 width", "is not a Cylinder self.assertTrue(schema.GetKnotsAttr()) # NurbsPatch Tests schema =", "UsdGeom.Mesh.Define(s, \"/Mesh\") meshPrim.CreatePointsAttr(pointsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( meshPrim, Usd.TimeCode.Default()) for a,", "is a XFormable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh is not a Cylinder", "test_TypeUsage(self): # Perform Type-Ness Checking for ComputeExtent pointsAsList = [(0,", "on IsA queries for Sphere # and everything it inherits", "expected comparison numDataSets = len(curvesPoints) for i in range(numDataSets): pointsData", "Compose Stage stage = Usd.Stage.CreateInMemory() # Xformable Tests identity =", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Sphere is not a Cylinder self.assertTrue(schema.GetRadiusAttr()) # Xform", "n[1:] self.assertTrue((\"Get\" + name + \"Attr\") in dir(mesh), (\"Get\" +", "by the schema # to match the names returned via", "tests that this # fix has been reverted. We no", "into a # mesh, since Define() always authors typeName. mesh", "(3,0,0)] # Test Curve with no width ] curvesWidths =", "allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) #", "def test_Points(self): stage = Usd.Stage.CreateInMemory() # Points Tests schema =", "# Cylinder is a Xformable self.assertTrue(prim.IsA(UsdGeom.Cylinder)) # Cylinder is a", "this # fix has been reverted. We no longer allow", "UsdGeom.PointBased.ComputeExtent(pointsData) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5))", "access behaves as expected. # ori = p.GetAttribute(\"orientation\") # The", "parent = s.OverridePrim('/parent') self.assertTrue(parent) # Make a subscope. scope =", "# for all of them. for t in types: self.assertTrue(spherePrim.IsA(t))", "the trade # names, trademarks, service marks, or product names", "behaves as expected. # ori = p.GetAttribute(\"orientation\") # The generic", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Capsule is not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cone", "zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Test UsdGeomCurves curvesPoints =", "1 width [.5, .1], # Test Curve with 2 widths", "is not a Mesh self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope is not a", "1e-5)) camera.GetVerticalApertureAttr().Set(2.0) self.assertEqual(camera.GetVerticalApertureAttr().Get(), 2.0) self.assertEqual(camera.GetFocalLengthAttr().Get(), 50.0) camera.GetFocalLengthAttr().Set(35.0) self.assertTrue(Gf.IsClose(camera.GetFocalLengthAttr().Get(), 35.0, 1e-5))", "with 1 width [.5, .1], # Test Curve with 2", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Xform is not a Cylinder self.assertTrue(schema.GetXformOpOrderAttr()) def test_Fallbacks(self):", "# # 6. Trademarks. This License does not grant permission", "s = Usd.Stage.CreateInMemory() prim = s.DefinePrim('/prim') types = [Tf.Type.FindByName('UsdGeomMotionAPI'), Tf.Type.FindByName('UsdGeomModelAPI')]", "list(schema.GetIdsAttr().Get()) # convert VtArray to list self.assertEqual(ids, resolvedIds) def test_Revert_Bug111239(self):", "Simple Width Test [2, 4], # Multiple Width Test [2,", "all our applied schemas show up for t in types:", "when applying to the # null prim. with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.MotionAPI.Apply(Usd.Prim()))", "Definition should win. self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetVariability(), Sdf.VariabilityVarying) allMetadata = radius.GetAllMetadata()", "a copy of the Apache License at # # http://www.apache.org/licenses/LICENSE-2.0", "well known schema. However, it's not # yet authored at", "Assert that attribute fallback values are returned for builtin attributes.", "mesh, and sanity check it still evals the same mesh2", "# Erroneous Widths/Points Test [1, 2, 2, 1] # Complex", "xform = Tf.Type.FindByName(\"UsdGeomXform\") imageable = Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform))", "geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI)) def test_Apply(self): s =", "# Capsule is not a Cylinder self.assertTrue(schema.GetAxisAttr()) # Cone Tests", "= Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry().IsConcrete(xform)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(imageable)) self.assertFalse(Usd.SchemaRegistry().IsConcrete(geomModelAPI)) def test_Apply(self):", "Licensor # and its affiliates, except as required to comply", "1e-5)) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0) camera.GetFocusDistanceAttr().Set(10.0) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0) def test_Points(self): stage =", "(1, 1, 0)], # Zero-Volume Extent Test [(-1, -1, -1),", "basisCurvesPrim.CreatePointsAttr(pointsData) basisCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( basisCurvesPrim, Usd.TimeCode.Default()) for a, b", "stage.DefinePrim(\"/Mesh\", \"Mesh\") self.assertTrue(p) mesh = UsdGeom.Mesh(p) self.assertTrue(mesh) self.assertTrue(mesh.GetPrim()) self.assertTrue(not mesh.GetPointsAttr().Get(1))", "for big numbers, and negative numbers ids = [8589934592, 1099511627776,", "Sdf.AttributeSpec( sphereSpec, 'radius', Sdf.ValueTypeNames.Double, variability=Sdf.VariabilityUniform, declaresCustom=True) self.assertTrue(radiusSpec.custom) self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform) #", "(1, 1, 1)], [(-1.5, -1, -4), (3, 3, 5)] #", "pointsAsList = [(0, 0, 0), (1, 1, 1), (2, 2,", "demotes to dense for non-defed prim overMesh = UsdGeom.Mesh(stage.OverridePrim('/overMesh')) overMesh.CreateDoubleSidedAttr(False,", "Scope has no builtins! # Sphere Tests schema = UsdGeom.Sphere.Define(stage,", "sphere.GetRadiusAttr() self.assertTrue(radius.HasFallbackValue()) radiusQuery = Usd.AttributeQuery(radius) self.assertTrue(radiusQuery.HasFallbackValue()) def test_DefineSchema(self): s =", "is not None: for a, b in zip(expectedExtent, list(actualExtent)): self.assertTrue(Gf.IsClose(a,", "self.assertTrue(mesh) def test_BasicMetadataCases(self): s = Usd.Stage.CreateInMemory() spherePrim = UsdGeom.Sphere.Define(s, '/sphere').GetPrim()", "curvesWidths = [ [1], # Test Curve with 1 width", "type and inheritance matches our expectations # 3. At least", "extent via generic UsdGeom.Boundable API. # UsdGeom.Mesh does not have", "Simple Width Test [(-2, -2, -2), (3, 3, 3)], #", "curvesWidths[i] expectedExtent = curvesSolutions[i] actualExtent = UsdGeom.Curves.ComputeExtent(pointsData, widths) for a,", "is a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh is a XFormable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "that all our applied schemas show up for t in", "permission to use the trade # names, trademarks, service marks,", "OR CONDITIONS OF ANY # KIND, either express or implied.", "# Test with a non-applied API schema. prim.HasAPI(Tf.Type.FindByName('UsdModelAPI')) if __name__", "0)], # Zero-Volume Extent Test [(-1, -1, -1), (1, 1,", "UsdGeom.Cube.Define(stage, \"/Cube\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cube is", "should have been authored at Usd.TimeCode.Default, so reading the #", "# Perform the correctness tests for PointBased and Points #", "Test for empty points prims emptyPoints = [] extremeExtentArr =", "for t in types: self.assertFalse(prim.HasAPI(t)) # Apply our schemas to", "BasisCurves is not a Cylinder self.assertTrue(schema.GetBasisAttr()) # Camera Tests schema", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsPatch is not a Mesh", "Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch", "[Tf.Type.FindByName('UsdGeomSphere'), Tf.Type.FindByName('UsdGeomGprim'), Tf.Type.FindByName('UsdGeomBoundable'), Tf.Type.FindByName('UsdGeomXformable'), Tf.Type.FindByName('UsdGeomImageable'), Tf.Type.FindByName('UsdTyped')] # Our sphere prim", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera is not a Cylinder self.assertTrue(schema.GetFocalLengthAttr()) # Capsule", "# Erroneous Widths/Points Test -> Returns None None, [(-2.5, -1.5,", "a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves is not a Cylinder self.assertTrue(schema.GetBasisAttr())", "Tests schema = UsdGeom.Capsule.Define(stage, \"/Capsule\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "# Capsule Tests schema = UsdGeom.Capsule.Define(stage, \"/Capsule\") self.assertTrue(schema) prim =", "prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable')) with self.assertRaises(Tf.ErrorException): # Test with a non-applied API schema.", "a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_TypeUsage(self):", "Create some simple test cases allPoints = [ [(1, 1,", "xform = Tf.Type.FindByName(\"UsdGeomXform\") imageable = Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry.IsTyped(xform))", "define a prim of its type # 2. Its type", "= Usd.Stage.CreateInMemory() spherePrim = s.DefinePrim('/sphere', typeName='Sphere') typelessPrim = s.DefinePrim('/regular') types", "-> Ok For Point-Based [(-1, -1, -1), (1, 1, 1)],", "# distributed under the Apache License with the above modification", "sphere = s.DefinePrim('/sphere', typeName='Sphere') tfTypeName = UsdGeom.Sphere._GetStaticTfType().typeName self.assertEqual(tfTypeName, 'UsdGeomSphere') usdGeomSphere", "name + \"Attr() not found in: \" + str(dir(mesh)))) def", "a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Points is not a Cylinder self.assertTrue(schema.GetWidthsAttr())", "Mesh Test for i in range(numDataSets): pointsData = allPoints[i] expectedExtent", "names returned via the generic API. # self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0)", "we expect in various # scenarios # Number 1: Sparse", "types = [Tf.Type.FindByName('UsdGeomSphere'), Tf.Type.FindByName('UsdGeomGprim'), Tf.Type.FindByName('UsdGeomBoundable'), Tf.Type.FindByName('UsdGeomXformable'), Tf.Type.FindByName('UsdGeomImageable'), Tf.Type.FindByName('UsdTyped')] # Our", "3, 5)] # Complex Test, Many Points/Widths ] pointsSolutions =", "Test Curve with 2 widths (MAX) [(0,0,0), (3,1,1)], # Test", "may obtain a copy of the Apache License at #", "prim = schema.GetPrim() self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh is a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "1, 1)], # Multiple Width Test [(-1, -1, -1), (1,", "UsdGeom.BasisCurves.Define(stage, \"/BasisCurves\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves is", "a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Mesh is a XFormable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) #", "The generic orientation attribute should be automatically defined because #", "[(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test Curve with 2 widths", "root.GetAppliedSchemas()) UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) # Ensure duplicates aren't picked up", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "modification is # distributed on an \"AS IS\" BASIS, WITHOUT", "self.assertNotEqual(xformOpOrderAttr.Get(), None) self.assertTrue(xformOpOrderAttr.Clear()) self.assertFalse(xformOpOrderAttr.HasAuthoredValue()) self.assertEqual(xformOpOrderAttr.Get(), None) self.assertFalse(xformOpOrder.HasFallbackValue()) mesh = UsdGeom.Mesh.Define(stage,", "self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomXform')) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable')) with self.assertRaises(Tf.ErrorException): # Test with", "not rh. self.assertEqual(ori.Get(), UsdGeom.Tokens.leftHanded) # The value \"rightHanded\" was set", "still evals the same mesh2 = UsdGeom.Mesh.Define(stage, \"/overMesh\") self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True)", "set with Gf vecs vec = [Gf.Vec3f(1,2,2), Gf.Vec3f(1,1,1)] self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0],", "with self.assertRaises(Tf.ErrorException): self.assertFalse(UsdGeom.ModelAPI.Apply(Usd.Prim())) def test_IsATypeless(self): from pxr import Usd, Tf", "and non-API types with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.Unknown) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomXform')) with", "Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # Author a custom property spec. layer", "generic UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() pointsPrim = UsdGeom.Points.Define(s, \"/Points\")", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Capsule", "# direct subclass xformOpOrder = xform.GetXformOpOrderAttr() self.assertFalse(xformOpOrder.HasAuthoredValue()) # xformOpOrder has", "# Test Curve with no width ] # Perform the", "CONDITIONS OF ANY # KIND, either express or implied. See", "Cube is not a Cylinder self.assertTrue(schema.GetSizeAttr()) # Cylinder Tests schema", "builtins! # Sphere Tests schema = UsdGeom.Sphere.Define(stage, \"/Sphere\") self.assertTrue(schema) prim", "obtain a copy of the Apache License at # #", "aren't picked up UsdGeom.ModelAPI.Apply(root) self.assertEqual(['MotionAPI', 'GeomModelAPI'], root.GetAppliedSchemas()) # Verify that", "UsdGeom.PointBased.ComputeExtent(emptyPoints) # We need to map the contents of extremeExtentArr", "or create gives us the scope back. self.assertTrue(s.OverridePrim('/parent/subscope')) self.assertEqual(s.OverridePrim('/parent/subscope'), scope.GetPrim())", "self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas()) # Ensure duplicates aren't picked", "Zero-Volume Extent Test [(0, 0, 0), (0, 0, 0)], #", "[(-.5,-.5,-.5), (3.5,1.5,1.5)], # Test Curve with 1 width [(-.25,-.25,-.25), (3.25,1.25,1.25)],", "Test Curve with no width ] # Perform the actual", "UsdGeom.NurbsCurves.Define(stage, \"/NurbsCurves\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves is", "dir(mesh), (\"Get\" + name + \"Attr() not found in: \"", "overMesh.CreateDoubleSidedAttr(True, True) self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # make it a defined mesh,", "Number 1: Sparse and non-sparse authoring on def'd prim mesh.CreateDoubleSidedAttr(False,", "s = Usd.Stage.CreateInMemory() parent = s.OverridePrim('/parent') self.assertTrue(parent) # Make a", "-4)], ] allWidths = [ [0], # Zero-Volume Extent Test", "numbers ids = [8589934592, 1099511627776, 0, -42] schema.CreateIdsAttr(ids) resolvedIds =", "via generic UsdGeom.Boundable API. # UsdGeom.Mesh does not have its", "not use this file except in # compliance with the", "[(-1, -1, -1), (1, 1, 1)], # Erroneous Widths/Points Test", "self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # List fields on", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # NurbsPatch is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch is", "List fields on 'visibility' attribute -- should include 'allowedTokens', #", "s.DefinePrim('/usdGeomSphere', typeName='tfTypeName') self.assertTrue(UsdGeom.Sphere(sphere)) self.assertTrue('radius' in [a.GetName() for a in sphere.GetAttributes()])", "empty points prims emptyPoints = [] extremeExtentArr = UsdGeom.PointBased.ComputeExtent(emptyPoints) #", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves is not a Cylinder self.assertTrue(schema.GetBasisAttr()) # Camera", "(3.5,1.5,1.5)], # Test Curve with 1 width [(-.25,-.25,-.25), (3.25,1.25,1.25)], #", "# NurbsCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves is not", "self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_TypeUsage(self): # Perform Type-Ness Checking for", "= UsdGeom.Mesh.Define(stage, \"/Mesh\") self.assertTrue(schema) prim = schema.GetPrim() self.assertTrue(prim.IsA(UsdGeom.Mesh)) # Mesh", "# NurbsPatch is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsPatch is not", "to in writing, software # distributed under the Apache License", "types: self.assertTrue(prim.HasAPI(t)) # Check that we get an exception for", "in this module, validate that: # 1. We can define", "self.assertTrue(radius.IsDefined()) self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetTypeName(), 'double') allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double')", "Perform the actual v. expected comparison numDataSets = len(curvesPoints) for", "len(curvesPoints) for i in range(numDataSets): pointsData = curvesPoints[i] widths =", "a Cylinder self.assertTrue(schema.GetUKnotsAttr()) # Points Tests schema = UsdGeom.Points.Define(stage, \"/Points\")", "the generic API. # self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0) self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False)) for", "of its builtin properties is available and defined # BasisCurves", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Scope is not a Cylinder # Scope has", "widths [(0,0,0), (1,1,1), (2,1,1), (3,0,0)] # Test Curve with no", "-1, 5), (-1.5, 0, 3), (1, 3, -2), (2, 2,", "Vt.Vec4fArray()) cp = Vt.Vec4fArray([(1, 2, 3, 4), (8, 7, 6,", "by applicable law or agreed to in writing, software #", "need to map the contents of extremeExtentArr to floats from", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Camera is not a Mesh", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # BasisCurves is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves", "should return false # for all of them. for t", "radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # List fields", "get exceptions but don't crash when applying to the #", "curvesSolutions[i] actualExtent = UsdGeom.Curves.ComputeExtent(pointsData, widths) for a, b in zip(expectedExtent,", "Curve with 2 widths [] # Test Curve with no", "and Compose Stage l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) #", "TestUsdGeomSchemata(unittest.TestCase): def test_Basic(self): l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) p", "sanity check. We expect the names returned by the schema", "with self.assertRaises(Tf.ErrorException): # Test with a non-applied API schema. prim.HasAPI(Tf.Type.FindByName('UsdModelAPI'))", "= spherePrim.GetAttribute('radius') self.assertTrue(radius.HasMetadata('custom')) self.assertTrue(radius.HasMetadata('typeName')) self.assertTrue(radius.HasMetadata('variability')) self.assertTrue(radius.IsDefined()) self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetTypeName(), 'double')", "4, 5.5)] # Complex Test, Many Points/Widths ] # Perform", "self.assertEqual(camera.GetFStopAttr().Get(), 0.0) camera.GetFStopAttr().Set(2.8) self.assertTrue(Gf.IsClose(camera.GetFStopAttr().Get(), 2.8, 1e-5)) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0) camera.GetFocusDistanceAttr().Set(10.0) self.assertEqual(camera.GetFocusDistanceAttr().Get(),", "self.assertTrue(Gf.IsClose(a, b, 1e-5)) basisCurvesPrim = UsdGeom.BasisCurves.Define(s, \"/BasisCurves\") basisCurvesPrim.CreatePointsAttr(pointsData) basisCurvesPrim.CreateWidthsAttr(widths) actualExtent", "# Try authoring and reverting... xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder) self.assertTrue(xformOpOrderAttr) self.assertEqual(xformOpOrderAttr.Get(),", "software # distributed under the Apache License with the above", "zip(expectedExtent, list(actualExtent)): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Mesh Test for i", "= UsdGeom.NurbsCurves.Define(stage, \"/NurbsCurves\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # NurbsCurves", "self.assertTrue(radius.HasMetadata('variability')) self.assertTrue(radius.IsDefined()) self.assertTrue(not radius.IsCustom()) self.assertEqual(radius.GetTypeName(), 'double') allMetadata = radius.GetAllMetadata() self.assertEqual(allMetadata['typeName'],", "\"/BasisCurves\") basisCurvesPrim.CreatePointsAttr(pointsData) basisCurvesPrim.CreateWidthsAttr(widths) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( basisCurvesPrim, Usd.TimeCode.Default()) for a,", "actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) def test_Bug116593(self): from pxr import Gf", "35.0, 1e-5)) self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000)) camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10)) self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(5, 10),", "Curve with 1 width [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], # Test", "Tf.Type.FindByName(\"UsdGeomImageable\") geomModelAPI = Tf.Type.FindByName(\"UsdGeomModelAPI\") self.assertTrue(Usd.SchemaRegistry.IsTyped(xform)) self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable)) self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI)) def test_Concrete(self): from", "Define() always authors typeName. mesh = UsdGeom.Mesh.Define(s, '/parent/subscope') self.assertTrue(mesh) self.assertTrue(not", "However, it's not # yet authored at the current edit", "Our plain prim should return false # for all of", "Many Points/Widths ] pointBasedSolutions = [ [(1, 1, 0), (1,", "# Simple Width Test [(-1, -1, -1), (1, 1, 1)],", "not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder))", "for i in range(numDataSets): pointsData = allPoints[i] widthsData = allWidths[i]", "Stage stage = Usd.Stage.CreateInMemory() # Xformable Tests identity = Gf.Matrix4d(1)", "= spherePrim.GetAttribute('visibility') self.assertTrue(visibility.IsDefined()) self.assertTrue('allowedTokens' in visibility.GetAllMetadata()) # Assert that attribute", "self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0) def test_Points(self): stage = Usd.Stage.CreateInMemory() # Points Tests", "prim schema type in this module, validate that: # 1.", "For Point-Based [(-1, -1, -1), (1, 1, 1)], [(-1.5, -1,", "is not a Cylinder self.assertTrue(schema.GetBasisAttr()) # Camera Tests schema =", "-2), (3, 3, 3)], # Multiple Width Test # Erroneous", "# Cube is not a Cylinder self.assertTrue(schema.GetSizeAttr()) # Cylinder Tests", "Trademarks. is deleted and replaced with: # # 6. Trademarks.", "self.assertTrue(parent) # Make a subscope. scope = UsdGeom.Scope.Define(s, '/parent/subscope') self.assertTrue(scope)", "applicable law or agreed to in writing, software # distributed", "Tests identity = Gf.Matrix4d(1) origin = Gf.Vec3f(0, 0, 0) xform", "\"/overMesh\") self.assertEqual(overMesh.GetDoubleSidedAttr().Get(), True) # Check querying of fallback values. sphere", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # BasisCurves is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # BasisCurves is", "self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera is", "comply with Section 4(c) of # the License and to", "Cylinder Tests schema = UsdGeom.Cylinder.Define(stage, \"/Cylinder\") self.assertTrue(schema) prim = schema.GetPrim()", "not a Mesh self.assertFalse(prim.IsA(UsdGeom.Xformable)) # Scope is not a Xformable", "expectedExtent is not None: for a, b in zip(expectedExtent, list(actualExtent)):", "Camera is Xformable self.assertEqual(camera.GetProjectionAttr().Get(), 'perspective') camera.GetProjectionAttr().Set('orthographic') self.assertEqual(camera.GetProjectionAttr().Get(), 'orthographic') self.assertTrue(Gf.IsClose(camera.GetHorizontalApertureAttr().Get(), 0.825", "# Complex Test, Many Points/Widths ] pointsSolutions = [ [(1,", "replaced with: # # 6. Trademarks. This License does not", "UsdGeom.Tokens.rightHanded) self.assertEqual(ori.Get(11), UsdGeom.Tokens.rightHanded) # # Attribute name sanity check. We", "= UsdGeom.BasisCurves.Define(stage, \"/Curves\") self.assertEqual(curves.GetNormalsInterpolation(), UsdGeom.Tokens.vertex) self.assertEqual(curves.GetWidthsInterpolation(), UsdGeom.Tokens.vertex) # Before we", "Test # Complex Test, Many Points/Widths [(3, -1, 5), (-1.5,", "and sanity check it still evals the same mesh2 =", "0)], # Simple Width Test [(-1, -1, -1), (1, 1,", "Camera is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Camera is a", "None) self.assertFalse(xformOpOrder.HasFallbackValue()) # Try authoring and reverting... xformOpOrderAttr = xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder)", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "= Gf.Range3f(Gf.Vec3f(*map(float, extremeExtentArr[0])), Gf.Vec3f(*map(float, extremeExtentArr[1]))) self.assertTrue(extremeExtentRange.IsEmpty()) # PointBased Test numDataSets", "# # You may obtain a copy of the Apache", "n[0].upper() + n[1:] self.assertTrue((\"Get\" + name + \"Attr\") in dir(mesh),", "-1, -1), (1, 1, 1)], [(-1.5, -1, -4), (3, 3,", "Verify that we get exceptions but don't crash when applying", "None) def test_Camera(self): from pxr import Gf stage = Usd.Stage.CreateInMemory()", "0), (1, 1, 1), (2, 2, 2)] pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList)", "prim = s.DefinePrim('/prim') types = [Tf.Type.FindByName('UsdGeomMotionAPI'), Tf.Type.FindByName('UsdGeomModelAPI')] # Check that", "big numbers, and negative numbers ids = [8589934592, 1099511627776, 0,", "mesh, since Define() always authors typeName. mesh = UsdGeom.Mesh.Define(s, '/parent/subscope')", "in range(numDataSets): pointsData = allPoints[i] expectedExtent = pointBasedSolutions[i] actualExtent =", "Tf.Type.FindByName('UsdGeomModelAPI')] # Check that no APIs have yet been applied", "# Zero-Volume Extent Test [2], # Simple Width Test [2,", "'/parent/subscope') self.assertTrue(mesh) self.assertTrue(not scope) # Make a mesh at a", "] pointBasedSolutions = [ [(1, 1, 0), (1, 1, 0)],", "or product names of the Licensor # and its affiliates,", "don't crash when applying to the # null prim. with", "Complex Test, Many Points/Widths ] # Perform the correctness tests", "test_Camera(self): from pxr import Gf stage = Usd.Stage.CreateInMemory() camera =", "authored at the current edit target. self.assertTrue(ori.IsDefined()) self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) #", "modification to it: # Section 6. Trademarks. is deleted and", "extent for PointBased prims. s = Usd.Stage.CreateInMemory() meshPrim = UsdGeom.Mesh.Define(s,", "Check that all our applied schemas show up for t", "Test, Many Points/Widths ] pointsSolutions = [ [(1, 1, 0),", "widths = curvesWidths[i] expectedExtent = curvesSolutions[i] actualExtent = UsdGeom.Curves.ComputeExtent(pointsData, widths)", "# Author a custom property spec. layer = s.GetRootLayer() sphereSpec", "self.assertEqual(xformOpOrderAttr.Get(), None) opOrderVal = [\"xformOp:transform\"] self.assertTrue(xformOpOrderAttr.Set(opOrderVal)) self.assertTrue(xformOpOrderAttr.HasAuthoredValue()) self.assertNotEqual(xformOpOrderAttr.Get(), None) self.assertTrue(xformOpOrderAttr.Clear())", "service marks, or product names of the Licensor # and", "Ok For Point-Based [(-1, -1, -1), (1, 1, 1)], [(-1.5,", "self.assertTrue(UsdGeom.ModelAPI(prim).SetExtentsHint(vec)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[0], Gf.Vec3f(1,2,2)) self.assertEqual(UsdGeom.ModelAPI(prim).GetExtentsHint()[1], Gf.Vec3f(12,3,3)) # set with Gf vecs", "does not have its own compute extent function, so #", "Tests schema = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh))", "self.assertTrue(Usd.SchemaRegistry.IsTyped(imageable)) self.assertFalse(Usd.SchemaRegistry.IsTyped(geomModelAPI)) def test_Concrete(self): from pxr import Tf xform =", "self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue()) # Number 2: Sparse authoring demotes to dense for", "for n in mesh.GetSchemaAttributeNames(): # apiName overrides if n ==", "Usd.Stage.Open(l.identifier) p = stage.DefinePrim(\"/Mesh\", \"Mesh\") self.assertTrue(p) mesh = UsdGeom.Mesh(p) self.assertTrue(mesh)", "self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves is not a Cylinder self.assertTrue(schema.GetKnotsAttr()) # NurbsPatch", "# Check querying of fallback values. sphere = UsdGeom.Sphere.Define(stage, \"/Sphere\")", "1)], # Multiple Width Test # Erroneous Widths/Points Test ->", "applied for t in types: self.assertFalse(prim.HasAPI(t)) # Apply our schemas", "Stage l = Sdf.Layer.CreateAnonymous() stage = Usd.Stage.Open(l.identifier) # For every", "= xform.GetPrim().GetAttribute(UsdGeom.Tokens.xformOpOrder) self.assertTrue(xformOpOrderAttr) self.assertEqual(xformOpOrderAttr.Get(), None) opOrderVal = [\"xformOp:transform\"] self.assertTrue(xformOpOrderAttr.Set(opOrderVal)) self.assertTrue(xformOpOrderAttr.HasAuthoredValue())", "# Test Curve with 2 widths (MAX) [(0,0,0), (3,1,1)], #", "and limitations under the Apache License. # pylint: disable=map-builtin-not-iterating import", "module, validate that: # 1. We can define a prim", "s.DefinePrim('/regular') types = [Tf.Type.FindByName('UsdGeomSphere'), Tf.Type.FindByName('UsdGeomGprim'), Tf.Type.FindByName('UsdGeomBoundable'), Tf.Type.FindByName('UsdGeomXformable'), Tf.Type.FindByName('UsdGeomImageable'), Tf.Type.FindByName('UsdTyped')] #", "not a Cylinder self.assertTrue(schema.GetRadiusAttr()) # Xform Tests schema = UsdGeom.Xform.Define(stage,", "a custom property spec. layer = s.GetRootLayer() sphereSpec = layer.GetPrimAtPath('/sphere')", "def test_Bug116593(self): from pxr import Gf s = Usd.Stage.CreateInMemory() prim", "\"/Capsule\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Capsule is not", "Usd.Stage.CreateInMemory() camera = UsdGeom.Camera.Define(stage, \"/Camera\") self.assertTrue(camera.GetPrim().IsA(UsdGeom.Xformable)) # Camera is Xformable", "Test -> Returns None None, [(-2.5, -1.5, -4.5), (3.5, 4,", "test_Typed(self): from pxr import Tf xform = Tf.Type.FindByName(\"UsdGeomXform\") imageable =", "= schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable))", "1e-5)) self.assertEqual(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(1, 1000000)) camera.GetClippingRangeAttr().Set(Gf.Vec2f(5, 10)) self.assertTrue(Gf.IsClose(camera.GetClippingRangeAttr().Get(), Gf.Vec2f(5, 10), 1e-5))", "= Sdf.AttributeSpec( sphereSpec, 'radius', Sdf.ValueTypeNames.Double, variability=Sdf.VariabilityUniform, declaresCustom=True) self.assertTrue(radiusSpec.custom) self.assertEqual(radiusSpec.variability, Sdf.VariabilityUniform)", "is a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Cone is not a Cylinder", "License and to reproduce the content of the NOTICE file.", "= xform.GetXformOpOrderAttr() self.assertFalse(xformOpOrder.HasAuthoredValue()) # xformOpOrder has no fallback value self.assertEqual(xformOpOrder.Get(),", "pxr import Gf stage = Usd.Stage.CreateInMemory() camera = UsdGeom.Camera.Define(stage, \"/Camera\")", "# Test Curve with 1 width [.5, .1], # Test", "# # Attribute name sanity check. We expect the names", "= UsdGeom.Xform.Define(stage, \"/Xform\") # direct subclass xformOpOrder = xform.GetXformOpOrderAttr() self.assertFalse(xformOpOrder.HasAuthoredValue())", "path, should work. mesh = UsdGeom.Mesh.Define(s, '/parent/mesh') self.assertTrue(mesh) def test_BasicMetadataCases(self):", "Section 4(c) of # the License and to reproduce the", "Our sphere prim should return true on IsA queries for", "some simple test cases allPoints = [ [(1, 1, 0)],", "Mesh is a XFormable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Mesh is not a", "mesh = UsdGeom.Mesh.Define(s, '/parent/mesh') self.assertTrue(mesh) def test_BasicMetadataCases(self): s = Usd.Stage.CreateInMemory()", "a registered attribute of a well known schema. However, it's", "# Copyright 2017 Pixar # # Licensed under the Apache", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Scope is not a Mesh self.assertFalse(prim.IsA(UsdGeom.Xformable)) #", "Usd.Stage.CreateInMemory() parent = s.OverridePrim('/parent') self.assertTrue(parent) # Make a subscope. scope", "= Usd.Stage.CreateInMemory() # Points Tests schema = UsdGeom.Points.Define(stage, \"/Points\") self.assertTrue(schema)", "None None, [(-2.5, -1.5, -4.5), (3.5, 4, 5.5)] # Complex", "pointBasedSolutions[i] actualExtent = UsdGeom.PointBased.ComputeExtent(pointsData) for a, b in zip(expectedExtent, actualExtent):", "# Mesh Tests schema = UsdGeom.Mesh.Define(stage, \"/Mesh\") self.assertTrue(schema) prim =", "expectations # 3. At least one of its builtin properties", "reproduce the content of the NOTICE file. # # You", "or implied. See the Apache License for the specific #", "= s.DefinePrim('/hello') self.assertEqual([], root.GetAppliedSchemas()) # Check duplicates UsdGeom.MotionAPI.Apply(root) self.assertEqual(['MotionAPI'], root.GetAppliedSchemas())", "# We need to map the contents of extremeExtentArr to", "# make it a defined mesh, and sanity check it", "(1, 1, 0)], # Zero-Volume Extent Test [(0, 0, 0),", "to test a change for Bug111239, but now tests that", "Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # NurbsCurves is not a Cylinder self.assertTrue(schema.GetKnotsAttr()) #", "schema = UsdGeom.Sphere.Define(stage, \"/Sphere\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "import Sdf, Usd, UsdGeom, Vt, Gf, Tf class TestUsdGeomSchemata(unittest.TestCase): def", "Width Test [2, 4], # Multiple Width Test [2, 4,", "UsdGeom.Boundable API s = Usd.Stage.CreateInMemory() nurbsCurvesPrim = UsdGeom.NurbsCurves.Define(s, \"/NurbsCurves\") nurbsCurvesPrim.CreatePointsAttr(pointsData)", "does not grant permission to use the trade # names,", "2.8, 1e-5)) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 0.0) camera.GetFocusDistanceAttr().Set(10.0) self.assertEqual(camera.GetFocusDistanceAttr().Get(), 10.0) def test_Points(self): stage", "schema = UsdGeom.Cone.Define(stage, \"/Cone\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "(1, 1, 1), (2, 2, 2)] pointsAsVec3fArr = Vt.Vec3fArray(pointsAsList) comp", "UsdGeom.Curves.ComputeExtent(pointsData, widths) for a, b in zip(expectedExtent, actualExtent): self.assertTrue(Gf.IsClose(a, b,", "[2], # Simple Width Test [2, 4], # Multiple Width", "contents of extremeExtentArr to floats from # num.float32s due to", "+ str(dir(mesh)))) def test_IsA(self): # Author Scene and Compose Stage", "in sphere.GetAttributes()]) self.assertFalse(UsdGeom.Sphere(usdGeomSphere)) self.assertFalse('radius' in [a.GetName() for a in usdGeomSphere.GetAttributes()])", "self.assertTrue(len(mesh.GetSchemaAttributeNames()) > 0) self.assertNotEqual(mesh.GetSchemaAttributeNames(True), mesh.GetSchemaAttributeNames(False)) for n in mesh.GetSchemaAttributeNames(): #", "Width Test [2, 4, 5], # Erroneous Widths/Points Test [1,", "= UsdGeom.PointBased.ComputeExtent(emptyPoints) # We need to map the contents of", "test_HasAPI(self): from pxr import Usd, Tf s = Usd.Stage.CreateInMemory() prim", "should fall back to the extent for PointBased prims. s", "self.assertTrue(ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) mesh.GetOrientationAttr().Set(UsdGeom.Tokens.rightHanded, 10) # \"leftHanded\" should have been authored at", "range(numDataSets): pointsData = allPoints[i] widthsData = allWidths[i] expectedExtent = pointsSolutions[i]", "self.assertTrue(prim.HasAPI(t)) # Check that we get an exception for unknown", "\"/Points\") pointsPrim.CreatePointsAttr(pointsData) pointsPrim.CreateWidthsAttr(widthsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( pointsPrim, Usd.TimeCode.Default()) if actualExtent", "schema = UsdGeom.Cube.Define(stage, \"/Cube\") self.assertTrue(schema) prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) #", "self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Points is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) # Points", "(1,1,1), (2,1,1), (3,0,0)], # Test Curve with 2 widths [(0,0,0),", "self.assertEqual(allMetadata['typeName'], 'double') self.assertEqual(allMetadata['variability'], Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # Author a custom", "Test UsdGeomCurves curvesPoints = [ [(0,0,0), (1,1,1), (2,1,1), (3,0,0)], #", "use the trade # names, trademarks, service marks, or product", "for PointBased prims. s = Usd.Stage.CreateInMemory() meshPrim = UsdGeom.Mesh.Define(s, \"/Mesh\")", "a subscope. scope = UsdGeom.Scope.Define(s, '/parent/subscope') self.assertTrue(scope) # Assert that", "at the current edit target. self.assertTrue(ori.IsDefined()) self.assertTrue(not ori.IsAuthoredAt(ori.GetStage().GetEditTarget())) # Author", "meshPrim.CreatePointsAttr(pointsData) actualExtent = UsdGeom.Boundable.ComputeExtentFromPlugins( meshPrim, Usd.TimeCode.Default()) for a, b in", "Sdf.VariabilityVarying) self.assertEqual(allMetadata['custom'], False) # List fields on 'visibility' attribute --", "prim = schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Cone is not a Mesh", "# it should fall back to the extent for PointBased", "Perform the correctness tests for PointBased and Points # Test", "UsdGeom.Tokens.leftHanded) # The value \"rightHanded\" was set at t=10, so", "name sanity check. We expect the names returned by the", "a Xformable self.assertFalse(prim.IsA(UsdGeom.Cylinder)) # Camera is not a Cylinder self.assertTrue(schema.GetFocalLengthAttr())", "= Usd.Stage.CreateInMemory() pointsPrim = UsdGeom.Points.Define(s, \"/Points\") pointsPrim.CreatePointsAttr(pointsData) pointsPrim.CreateWidthsAttr(widthsData) actualExtent =", "self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.Unknown) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomXform')) with self.assertRaises(Tf.ErrorException): prim.HasAPI(Tf.Type.FindByName('UsdGeomImageable')) with self.assertRaises(Tf.ErrorException):", "schema.GetPrim() self.assertFalse(prim.IsA(UsdGeom.Mesh)) # Xform is not a Mesh self.assertTrue(prim.IsA(UsdGeom.Xformable)) #", "True) self.assertFalse(mesh.GetDoubleSidedAttr().HasAuthoredValue()) mesh.CreateDoubleSidedAttr(False, False) self.assertTrue(mesh.GetDoubleSidedAttr().HasAuthoredValue()) # Number 2: Sparse authoring", "# Cylinder Tests schema = UsdGeom.Cylinder.Define(stage, \"/Cylinder\") self.assertTrue(schema) prim =", "actualExtent): self.assertTrue(Gf.IsClose(a, b, 1e-5)) # Points Test for i in" ]
[ "worst every player can play every (game duration + stagger", "1) match_time = tournament_start_time + timedelta(minutes=(game_duration * round_num)) time_df.loc[round_key, :]", "= tournament_start_time + timedelta(minutes=(game_duration * round_num)) time_df.loc[round_key, :] = match_time.strftime('%I:%M%p')", "* round_num))).strftime('%I:%M%p') for def_time in default_spread] time_df.loc[round_key, :] = match_times", "the latest. \"\"\" for round_num in range(time_df.shape[0]): round_key = 'Round", "(def_time + timedelta(minutes=((game_duration + game_stagger) * round_num))).strftime('%I:%M%p') for def_time in", "game_stagger) for game_num in range(time_df.shape[1])] match_times = [ (def_time +", "pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns) if game_stagger == 0: for round_num in range(time_df.shape[0]):", "every player can play every (game duration + stagger time)", "round_num))).strftime('%I:%M%p') for def_time in default_spread] time_df.loc[round_key, :] = match_times return", "columns=matchup_df.columns) if game_stagger == 0: for round_num in range(time_df.shape[0]): round_key", "default_spread = [tournament_start_time + timedelta(minutes=game_num * game_stagger) for game_num in", "+ timedelta(minutes=(game_duration * round_num)) time_df.loc[round_key, :] = match_time.strftime('%I:%M%p') return time_df", "# This is b/c your opponent begins play one stagger", "pandas as pd from datetime import timedelta def generate_times(matchup_df: pd.DataFrame,", "for round_num in range(time_df.shape[0]): round_key = 'Round ' + str(round_num", "str(round_num + 1) default_spread = [tournament_start_time + timedelta(minutes=game_num * game_stagger)", "round_num in range(time_df.shape[0]): round_key = 'Round ' + str(round_num +", "play every (game duration + stagger time) # This is", "[ (def_time + timedelta(minutes=((game_duration + game_stagger) * round_num))).strftime('%I:%M%p') for def_time", "= [tournament_start_time + timedelta(minutes=game_num * game_stagger) for game_num in range(time_df.shape[1])]", "[tournament_start_time + timedelta(minutes=game_num * game_stagger) for game_num in range(time_df.shape[1])] match_times", "the algorithm, at worst every player can play every (game", "time_df = pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns) if game_stagger == 0: for round_num", "'Round ' + str(round_num + 1) match_time = tournament_start_time +", "= [ (def_time + timedelta(minutes=((game_duration + game_stagger) * round_num))).strftime('%I:%M%p') for", "This is b/c your opponent begins play one stagger count", "time_df else: \"\"\" # Given the algorithm, at worst every", "from datetime import timedelta def generate_times(matchup_df: pd.DataFrame, tournament_start_time, game_duration, game_stagger):", "timedelta(minutes=(game_duration * round_num)) time_df.loc[round_key, :] = match_time.strftime('%I:%M%p') return time_df else:", "duration + stagger time) # This is b/c your opponent", "+ str(round_num + 1) default_spread = [tournament_start_time + timedelta(minutes=game_num *", "\"\"\" for round_num in range(time_df.shape[0]): round_key = 'Round ' +", "else: \"\"\" # Given the algorithm, at worst every player", "at the latest. \"\"\" for round_num in range(time_df.shape[0]): round_key =", "' + str(round_num + 1) default_spread = [tournament_start_time + timedelta(minutes=game_num", "\"\"\" # Given the algorithm, at worst every player can", "latest. \"\"\" for round_num in range(time_df.shape[0]): round_key = 'Round '", "tournament_start_time, game_duration, game_stagger): time_df = pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns) if game_stagger ==", "= match_time.strftime('%I:%M%p') return time_df else: \"\"\" # Given the algorithm,", "+ stagger time) # This is b/c your opponent begins", "range(time_df.shape[0]): round_key = 'Round ' + str(round_num + 1) match_time", "range(time_df.shape[0]): round_key = 'Round ' + str(round_num + 1) default_spread", "+ timedelta(minutes=((game_duration + game_stagger) * round_num))).strftime('%I:%M%p') for def_time in default_spread]", "you at the latest. \"\"\" for round_num in range(time_df.shape[0]): round_key", "time_df.loc[round_key, :] = match_time.strftime('%I:%M%p') return time_df else: \"\"\" # Given", "for def_time in default_spread] time_df.loc[round_key, :] = match_times return time_df", "str(round_num + 1) match_time = tournament_start_time + timedelta(minutes=(game_duration * round_num))", "# Given the algorithm, at worst every player can play", "game_stagger) * round_num))).strftime('%I:%M%p') for def_time in default_spread] time_df.loc[round_key, :] =", "pd.DataFrame, tournament_start_time, game_duration, game_stagger): time_df = pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns) if game_stagger", "return time_df else: \"\"\" # Given the algorithm, at worst", "stagger time) # This is b/c your opponent begins play", "as pd from datetime import timedelta def generate_times(matchup_df: pd.DataFrame, tournament_start_time,", "timedelta def generate_times(matchup_df: pd.DataFrame, tournament_start_time, game_duration, game_stagger): time_df = pd.DataFrame(index=matchup_df.index,", "for game_num in range(time_df.shape[1])] match_times = [ (def_time + timedelta(minutes=((game_duration", "can play every (game duration + stagger time) # This", "(game duration + stagger time) # This is b/c your", "== 0: for round_num in range(time_df.shape[0]): round_key = 'Round '", "+ timedelta(minutes=game_num * game_stagger) for game_num in range(time_df.shape[1])] match_times =", "match_time = tournament_start_time + timedelta(minutes=(game_duration * round_num)) time_df.loc[round_key, :] =", "b/c your opponent begins play one stagger count after you", "if game_stagger == 0: for round_num in range(time_df.shape[0]): round_key =", "datetime import timedelta def generate_times(matchup_df: pd.DataFrame, tournament_start_time, game_duration, game_stagger): time_df", "at worst every player can play every (game duration +", "count after you at the latest. \"\"\" for round_num in", "game_stagger == 0: for round_num in range(time_df.shape[0]): round_key = 'Round", "every (game duration + stagger time) # This is b/c", "+ 1) match_time = tournament_start_time + timedelta(minutes=(game_duration * round_num)) time_df.loc[round_key,", "+ game_stagger) * round_num))).strftime('%I:%M%p') for def_time in default_spread] time_df.loc[round_key, :]", "game_duration, game_stagger): time_df = pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns) if game_stagger == 0:", "round_key = 'Round ' + str(round_num + 1) default_spread =", "1) default_spread = [tournament_start_time + timedelta(minutes=game_num * game_stagger) for game_num", "timedelta(minutes=game_num * game_stagger) for game_num in range(time_df.shape[1])] match_times = [", "pd from datetime import timedelta def generate_times(matchup_df: pd.DataFrame, tournament_start_time, game_duration,", "game_num in range(time_df.shape[1])] match_times = [ (def_time + timedelta(minutes=((game_duration +", "import timedelta def generate_times(matchup_df: pd.DataFrame, tournament_start_time, game_duration, game_stagger): time_df =", "round_num)) time_df.loc[round_key, :] = match_time.strftime('%I:%M%p') return time_df else: \"\"\" #", "round_key = 'Round ' + str(round_num + 1) match_time =", "stagger count after you at the latest. \"\"\" for round_num", "0: for round_num in range(time_df.shape[0]): round_key = 'Round ' +", "= 'Round ' + str(round_num + 1) default_spread = [tournament_start_time", "+ 1) default_spread = [tournament_start_time + timedelta(minutes=game_num * game_stagger) for", "import pandas as pd from datetime import timedelta def generate_times(matchup_df:", "player can play every (game duration + stagger time) #", "is b/c your opponent begins play one stagger count after", "* round_num)) time_df.loc[round_key, :] = match_time.strftime('%I:%M%p') return time_df else: \"\"\"", "range(time_df.shape[1])] match_times = [ (def_time + timedelta(minutes=((game_duration + game_stagger) *", "'Round ' + str(round_num + 1) default_spread = [tournament_start_time +", "def generate_times(matchup_df: pd.DataFrame, tournament_start_time, game_duration, game_stagger): time_df = pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns)", "begins play one stagger count after you at the latest.", "tournament_start_time + timedelta(minutes=(game_duration * round_num)) time_df.loc[round_key, :] = match_time.strftime('%I:%M%p') return", "one stagger count after you at the latest. \"\"\" for", "+ str(round_num + 1) match_time = tournament_start_time + timedelta(minutes=(game_duration *", "match_time.strftime('%I:%M%p') return time_df else: \"\"\" # Given the algorithm, at", "match_times = [ (def_time + timedelta(minutes=((game_duration + game_stagger) * round_num))).strftime('%I:%M%p')", ":] = match_time.strftime('%I:%M%p') return time_df else: \"\"\" # Given the", "game_stagger): time_df = pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns) if game_stagger == 0: for", "Given the algorithm, at worst every player can play every", "your opponent begins play one stagger count after you at", "' + str(round_num + 1) match_time = tournament_start_time + timedelta(minutes=(game_duration", "play one stagger count after you at the latest. \"\"\"", "in range(time_df.shape[0]): round_key = 'Round ' + str(round_num + 1)", "opponent begins play one stagger count after you at the", "after you at the latest. \"\"\" for round_num in range(time_df.shape[0]):", "timedelta(minutes=((game_duration + game_stagger) * round_num))).strftime('%I:%M%p') for def_time in default_spread] time_df.loc[round_key,", "algorithm, at worst every player can play every (game duration", "in range(time_df.shape[1])] match_times = [ (def_time + timedelta(minutes=((game_duration + game_stagger)", "= pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns) if game_stagger == 0: for round_num in", "time) # This is b/c your opponent begins play one", "= 'Round ' + str(round_num + 1) match_time = tournament_start_time", "generate_times(matchup_df: pd.DataFrame, tournament_start_time, game_duration, game_stagger): time_df = pd.DataFrame(index=matchup_df.index, columns=matchup_df.columns) if", "* game_stagger) for game_num in range(time_df.shape[1])] match_times = [ (def_time" ]
[ "copies. if not thisitem[\"duplified\"]: return True found_something = True #", "in console.database.rooms.all(): if itemid in targetroom[\"items\"]: console.msg(\"{0}: {1} ({2}) is", "LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A", "type checks and casts. itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0,", "is holding the item. for targetuser in console.database.users.all(): if targetuser[\"name\"]", "itemid in console.user[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is in your inventory.\".format(NAME,", "OR THE USE OR OTHER DEALINGS # IN THE SOFTWARE.", "OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH", "in case the item is duplified and we can't return", "TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR", "without restriction, including without limitation the # rights to use,", "return True found_something = True # Check if someone else", "and associated documentation files (the \"Software\"), to # deal in", "Ex. `locate item 4`\"\"\" def COMMAND(console, args): # Perform initial", "permission notice shall be included in # all copies or", "is holding it. You can only locate an item that", "the Software without restriction, including without limitation the # rights", "for other copies. if not thisitem[\"duplified\"]: return True found_something =", "itemid is None: return False # Check if the item", "exists but has no location. Use `requisition` to fix this.\".format(NAME))", "targetuser in console.database.users.all(): if targetuser[\"name\"] == console.user[\"name\"]: continue if itemid", "holding=False) if not thisitem: return False # Keep track of", "True # Couldn't find the item. if not found_something: console.log.error(\"Item", "portions of the Software. # # THE SOFTWARE IS PROVIDED", "CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR", "itemid in targetuser[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is in the inventory", "the item. if not found_something: console.log.error(\"Item exists but has no", "# # The above copyright notice and this permission notice", "found_something = True # Check if the item is in", "an item that you own. Wizards can locate any item.", "# Check if the item exists. thisitem = COMMON.check_item(NAME, console,", "the item. for targetuser in console.database.users.all(): if targetuser[\"name\"] == console.user[\"name\"]:", "the inventory of: {3}.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetuser[\"name\"])) # If the", "and to permit persons to whom the Software is #", "other copies. if not thisitem[\"duplified\"]: return True found_something = True", "no location. Use `requisition` to fix this.\".format(NAME)) return False #", "copies of the Software, and to permit persons to whom", "hereby granted, free of charge, to any person obtaining a", "OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE", "# all copies or substantial portions of the Software. #", "OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.", "checks. if not COMMON.check(NAME, console, args, argc=1): return False #", "thisitem[\"id\"])) # If the item is duplified we need to", "HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER #", "args, checks=[[0, int]], retargs=0) if itemid is None: return False", "you own. Wizards can locate any item. Ex. `locate item", "included in # all copies or substantial portions of the", "Wizards can locate any item. Ex. `locate item 4`\"\"\" def", "console.msg(\"{0}: {1} ({2}) is in the inventory of: {3}.\".format(NAME, thisitem[\"name\"],", "is in a room. for targetroom in console.database.rooms.all(): if itemid", "item is duplified and we can't return right away. found_something", "distribute, sublicense, and/or # sell copies of the Software, and", "AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM,", "FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR", "someone else is holding the item. for targetuser in console.database.users.all():", "found_something = False # Check if we are holding the", "console.database.rooms.all(): if itemid in targetroom[\"items\"]: console.msg(\"{0}: {1} ({2}) is in", "can locate any item. Ex. `locate item 4`\"\"\" def COMMAND(console,", "notice and this permission notice shall be included in #", "Perform initial checks. if not COMMON.check(NAME, console, args, argc=1): return", "the Software. # # THE SOFTWARE IS PROVIDED \"AS IS\",", "CATEGORIES = [\"items\"] ALIASES = [\"find item\"] USAGE = \"locate", "in room: {3} ({4})\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetroom[\"name\"], targetroom[\"id\"])) # If", "# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR", "== console.user[\"name\"]: continue if itemid in targetuser[\"inventory\"]: console.msg(\"{0}: {1} ({2})", "TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN", "who is holding it. You can only locate an item", "all copies or substantial portions of the Software. # #", "return True found_something = True # Check if the item", "if not thisitem[\"duplified\"]: return True found_something = True # Check", "targetroom[\"name\"], targetroom[\"id\"])) # If the item is duplified we need", "# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF", "AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR", "room: {3} ({4})\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetroom[\"name\"], targetroom[\"id\"])) # If the", "= True # Check if the item is in a", "# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF", "sell copies of the Software, and to permit persons to", "\"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR #", "item. Ex. `locate item 4`\"\"\" def COMMAND(console, args): # Perform", "without limitation the # rights to use, copy, modify, merge,", "# Perform argument type checks and casts. itemid = COMMON.check_argtypes(NAME,", "for targetuser in console.database.users.all(): if targetuser[\"name\"] == console.user[\"name\"]: continue if", "if we are holding the item. if itemid in console.user[\"inventory\"]:", "item. if itemid in console.user[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is in", "item. if not found_something: console.log.error(\"Item exists but has no location:", "# of this software and associated documentation files (the \"Software\"),", "2018-2020 # # <NAME> # ####################### # ********** # Permission", "furnished to do so, subject to the following conditions: #", "to do so, subject to the following conditions: # #", "# The above copyright notice and this permission notice shall", "SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR", "the # rights to use, copy, modify, merge, publish, distribute,", "a copy # of this software and associated documentation files", "for targetroom in console.database.rooms.all(): if itemid in targetroom[\"items\"]: console.msg(\"{0}: {1}", "# deal in the Software without restriction, including without limitation", "return False # Check if the item exists. thisitem =", "notice shall be included in # all copies or substantial", "and casts. itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0)", "# ********** # Permission is hereby granted, free of charge,", "False # Keep track of whether we found anything in", "found_something = True # Couldn't find the item. if not", "Check if someone else is holding the item. for targetuser", "# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO", "and this permission notice shall be included in # all", "IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS", "checks=[[0, int]], retargs=0) if itemid is None: return False #", "if not COMMON.check(NAME, console, args, argc=1): return False # Perform", "NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE", "WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS #", "we can't return right away. found_something = False # Check", "following conditions: # # The above copyright notice and this", "keep looking for other copies. if not thisitem[\"duplified\"]: return True", "conditions: # # The above copyright notice and this permission", "IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS", "# <NAME> # ####################### # ********** # Permission is hereby", "Item exists but has no location. Use `requisition` to fix", "False # Perform argument type checks and casts. itemid =", "anything in case the item is duplified and we can't", "4`\"\"\" def COMMAND(console, args): # Perform initial checks. if not", "FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN", "# Couldn't find the item. if not found_something: console.log.error(\"Item exists", "the item exists. thisitem = COMMON.check_item(NAME, console, itemid, owner=True, holding=False)", "if the item is in a room. for targetroom in", "but has no location. Use `requisition` to fix this.\".format(NAME)) return", "(the \"Software\"), to # deal in the Software without restriction,", "\"Software\"), to # deal in the Software without restriction, including", "need to keep looking for other copies. if not thisitem[\"duplified\"]:", "console.msg(\"{0}: {1} ({2}) is in your inventory.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"])) #", "location: {item}\", item=itemid) console.msg(\"{0}: ERROR: Item exists but has no", "has no location. Use `requisition` to fix this.\".format(NAME)) return False", "return False # Perform argument type checks and casts. itemid", "if itemid is None: return False # Check if the", "DESCRIPTION = \"\"\"Find out what room the item <item_id> is", "and/or # sell copies of the Software, and to permit", "# ********** NAME = \"locate item\" CATEGORIES = [\"items\"] ALIASES", "be included in # all copies or substantial portions of", "# Copyright 2018-2020 # # <NAME> # ####################### # **********", "********** NAME = \"locate item\" CATEGORIES = [\"items\"] ALIASES =", "is hereby granted, free of charge, to any person obtaining", "location. Use `requisition` to fix this.\".format(NAME)) return False # Finished.", "# Keep track of whether we found anything in case", "if itemid in targetroom[\"items\"]: console.msg(\"{0}: {1} ({2}) is in room:", "but has no location: {item}\", item=itemid) console.msg(\"{0}: ERROR: Item exists", "locate an item that you own. Wizards can locate any", "{1} ({2}) is in your inventory.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"])) # If", "= \"locate item <item_id>\" DESCRIPTION = \"\"\"Find out what room", "True found_something = True # Couldn't find the item. if", "person obtaining a copy # of this software and associated", "the item is in a room. for targetroom in console.database.rooms.all():", "not thisitem[\"duplified\"]: return True found_something = True # Couldn't find", "subject to the following conditions: # # The above copyright", "itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0) if itemid", "thisitem = COMMON.check_item(NAME, console, itemid, owner=True, holding=False) if not thisitem:", "console.msg(\"{0}: {1} ({2}) is in room: {3} ({4})\".format(NAME, thisitem[\"name\"], thisitem[\"id\"],", "False # Check if we are holding the item. if", "sublicense, and/or # sell copies of the Software, and to", "in the Software without restriction, including without limitation the #", "to use, copy, modify, merge, publish, distribute, sublicense, and/or #", "THE SOFTWARE. # ********** NAME = \"locate item\" CATEGORIES =", "or who is holding it. You can only locate an", "is in room: {3} ({4})\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetroom[\"name\"], targetroom[\"id\"])) #", "item is duplified we need to keep looking for other", "or substantial portions of the Software. # # THE SOFTWARE", "thisitem[\"id\"], targetuser[\"name\"])) # If the item is duplified we need", "BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS", "FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL", "OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR", "False # Check if the item exists. thisitem = COMMON.check_item(NAME,", "CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS", "IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER", "# <NAME> # # locate_item.py # # Copyright 2018-2020 #", "CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION", "# Permission is hereby granted, free of charge, to any", "of charge, to any person obtaining a copy # of", "return True found_something = True # Couldn't find the item.", "INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, #", "of: {3}.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetuser[\"name\"])) # If the item is", "# # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY", "associated documentation files (the \"Software\"), to # deal in the", "if not found_something: console.log.error(\"Item exists but has no location: {item}\",", "COMMON.check(NAME, console, args, argc=1): return False # Perform argument type", "<item_id>\" DESCRIPTION = \"\"\"Find out what room the item <item_id>", "NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT", "= [\"find item\"] USAGE = \"locate item <item_id>\" DESCRIPTION =", "is in the inventory of: {3}.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetuser[\"name\"])) #", "None: return False # Check if the item exists. thisitem", "NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR", "in the inventory of: {3}.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetuser[\"name\"])) # If", "Perform argument type checks and casts. itemid = COMMON.check_argtypes(NAME, console,", "retargs=0) if itemid is None: return False # Check if", "is in your inventory.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"])) # If the item", "# If the item is duplified we need to keep", "{3} ({4})\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetroom[\"name\"], targetroom[\"id\"])) # If the item", "LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER", "whether we found anything in case the item is duplified", "limitation the # rights to use, copy, modify, merge, publish,", "so, subject to the following conditions: # # The above", "({2}) is in the inventory of: {3}.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetuser[\"name\"]))", "# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE", "def COMMAND(console, args): # Perform initial checks. if not COMMON.check(NAME,", "deal in the Software without restriction, including without limitation the", "Check if the item is in a room. for targetroom", "DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF", "locate_item.py # # Copyright 2018-2020 # # <NAME> # #######################", "item. for targetuser in console.database.users.all(): if targetuser[\"name\"] == console.user[\"name\"]: continue", "thisitem[\"duplified\"]: return True found_something = True # Check if someone", "the following conditions: # # The above copyright notice and", "is None: return False # Check if the item exists.", "a room. for targetroom in console.database.rooms.all(): if itemid in targetroom[\"items\"]:", "any item. Ex. `locate item 4`\"\"\" def COMMAND(console, args): #", "only locate an item that you own. Wizards can locate", "are holding the item. if itemid in console.user[\"inventory\"]: console.msg(\"{0}: {1}", "= True # Check if someone else is holding the", "itemid in targetroom[\"items\"]: console.msg(\"{0}: {1} ({2}) is in room: {3}", "this software and associated documentation files (the \"Software\"), to #", "the Software, and to permit persons to whom the Software", "= \"\"\"Find out what room the item <item_id> is in,", "your inventory.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"])) # If the item is duplified", "args, argc=1): return False # Perform argument type checks and", "`requisition` to fix this.\".format(NAME)) return False # Finished. return True", "THE SOFTWARE OR THE USE OR OTHER DEALINGS # IN", "the item is duplified we need to keep looking for", "find the item. if not found_something: console.log.error(\"Item exists but has", "# rights to use, copy, modify, merge, publish, distribute, sublicense,", "case the item is duplified and we can't return right", "####################### # ********** # Permission is hereby granted, free of", "holding the item. if itemid in console.user[\"inventory\"]: console.msg(\"{0}: {1} ({2})", "in # all copies or substantial portions of the Software.", "COMMON.check_item(NAME, console, itemid, owner=True, holding=False) if not thisitem: return False", "Keep track of whether we found anything in case the", "FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT", "thisitem[\"name\"], thisitem[\"id\"], targetuser[\"name\"])) # If the item is duplified we", "persons to whom the Software is # furnished to do", "IN THE SOFTWARE. # ********** NAME = \"locate item\" CATEGORIES", "in console.user[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is in your inventory.\".format(NAME, thisitem[\"name\"],", "SOFTWARE OR THE USE OR OTHER DEALINGS # IN THE", "if targetuser[\"name\"] == console.user[\"name\"]: continue if itemid in targetuser[\"inventory\"]: console.msg(\"{0}:", "<reponame>seisatsu/DennisMUD-ESP32<filename>src/commands/locate_item.py ####################### # <NAME> # # locate_item.py # # Copyright", "in, or who is holding it. You can only locate", "inventory of: {3}.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetuser[\"name\"])) # If the item", "not found_something: console.log.error(\"Item exists but has no location: {item}\", item=itemid)", "********** # Permission is hereby granted, free of charge, to", "in a room. for targetroom in console.database.rooms.all(): if itemid in", "to keep looking for other copies. if not thisitem[\"duplified\"]: return", "MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN", "thisitem[\"id\"], targetroom[\"name\"], targetroom[\"id\"])) # If the item is duplified we", "Software. # # THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT", "initial checks. if not COMMON.check(NAME, console, args, argc=1): return False", "to any person obtaining a copy # of this software", "targetuser[\"name\"] == console.user[\"name\"]: continue if itemid in targetuser[\"inventory\"]: console.msg(\"{0}: {1}", "item=itemid) console.msg(\"{0}: ERROR: Item exists but has no location. Use", "not COMMON.check(NAME, console, args, argc=1): return False # Perform argument", "console, itemid, owner=True, holding=False) if not thisitem: return False #", "thisitem[\"name\"], thisitem[\"id\"], targetroom[\"name\"], targetroom[\"id\"])) # If the item is duplified", "of the Software, and to permit persons to whom the", "console, args, checks=[[0, int]], retargs=0) if itemid is None: return", "ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN", "BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY,", "COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0) if itemid is None:", "room. for targetroom in console.database.rooms.all(): if itemid in targetroom[\"items\"]: console.msg(\"{0}:", "# locate_item.py # # Copyright 2018-2020 # # <NAME> #", "# ####################### # ********** # Permission is hereby granted, free", "IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING #", "Software is # furnished to do so, subject to the", "ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT", "PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR", "# Check if someone else is holding the item. for", "has no location: {item}\", item=itemid) console.msg(\"{0}: ERROR: Item exists but", "documentation files (the \"Software\"), to # deal in the Software", "whom the Software is # furnished to do so, subject", "is in, or who is holding it. You can only", "item exists. thisitem = COMMON.check_item(NAME, console, itemid, owner=True, holding=False) if", "int]], retargs=0) if itemid is None: return False # Check", "track of whether we found anything in case the item", "<NAME> # ####################### # ********** # Permission is hereby granted,", "THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY", "# # <NAME> # ####################### # ********** # Permission is", "substantial portions of the Software. # # THE SOFTWARE IS", "{item}\", item=itemid) console.msg(\"{0}: ERROR: Item exists but has no location.", "files (the \"Software\"), to # deal in the Software without", "item <item_id> is in, or who is holding it. You", "({2}) is in your inventory.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"])) # If the", "do so, subject to the following conditions: # # The", "LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,", "WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING", "what room the item <item_id> is in, or who is", "COMMAND(console, args): # Perform initial checks. if not COMMON.check(NAME, console,", "if not thisitem: return False # Keep track of whether", "item\" CATEGORIES = [\"items\"] ALIASES = [\"find item\"] USAGE =", "item <item_id>\" DESCRIPTION = \"\"\"Find out what room the item", "# furnished to do so, subject to the following conditions:", "any person obtaining a copy # of this software and", "Use `requisition` to fix this.\".format(NAME)) return False # Finished. return", "SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND,", "looking for other copies. if not thisitem[\"duplified\"]: return True found_something", "checks and casts. itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]],", "shall be included in # all copies or substantial portions", "KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO", "it. You can only locate an item that you own.", "OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES", "thisitem[\"duplified\"]: return True found_something = True # Couldn't find the", "can't return right away. found_something = False # Check if", "copyright notice and this permission notice shall be included in", "SOFTWARE. # ********** NAME = \"locate item\" CATEGORIES = [\"items\"]", "{1} ({2}) is in the inventory of: {3}.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"],", "USAGE = \"locate item <item_id>\" DESCRIPTION = \"\"\"Find out what", "{1} ({2}) is in room: {3} ({4})\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetroom[\"name\"],", "in targetroom[\"items\"]: console.msg(\"{0}: {1} ({2}) is in room: {3} ({4})\".format(NAME,", "ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED", "free of charge, to any person obtaining a copy #", "Check if the item exists. thisitem = COMMON.check_item(NAME, console, itemid,", "= [\"items\"] ALIASES = [\"find item\"] USAGE = \"locate item", "if itemid in console.user[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is in your", "argc=1): return False # Perform argument type checks and casts.", "True # Check if the item is in a room.", "({2}) is in room: {3} ({4})\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetroom[\"name\"], targetroom[\"id\"]))", "IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,", "# Check if the item is in a room. for", "found anything in case the item is duplified and we", "right away. found_something = False # Check if we are", "of the Software. # # THE SOFTWARE IS PROVIDED \"AS", "away. found_something = False # Check if we are holding", "in console.database.users.all(): if targetuser[\"name\"] == console.user[\"name\"]: continue if itemid in", "console.msg(\"{0}: ERROR: Item exists but has no location. Use `requisition`", "<NAME> # # locate_item.py # # Copyright 2018-2020 # #", "publish, distribute, sublicense, and/or # sell copies of the Software,", "console.log.error(\"Item exists but has no location: {item}\", item=itemid) console.msg(\"{0}: ERROR:", "targetroom[\"items\"]: console.msg(\"{0}: {1} ({2}) is in room: {3} ({4})\".format(NAME, thisitem[\"name\"],", "ALIASES = [\"find item\"] USAGE = \"locate item <item_id>\" DESCRIPTION", "return right away. found_something = False # Check if we", "item 4`\"\"\" def COMMAND(console, args): # Perform initial checks. if", "found_something: console.log.error(\"Item exists but has no location: {item}\", item=itemid) console.msg(\"{0}:", "\"\"\"Find out what room the item <item_id> is in, or", "continue if itemid in targetuser[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is in", "casts. itemid = COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0) if", "we need to keep looking for other copies. if not", "of this software and associated documentation files (the \"Software\"), to", "OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR", "OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE", "EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE", "console, args, argc=1): return False # Perform argument type checks", "item is in a room. for targetroom in console.database.rooms.all(): if", "= True # Couldn't find the item. if not found_something:", "in your inventory.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"])) # If the item is", "# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM,", "PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS", "True found_something = True # Check if the item is", "out what room the item <item_id> is in, or who", "= False # Check if we are holding the item.", "####################### # <NAME> # # locate_item.py # # Copyright 2018-2020", "# # locate_item.py # # Copyright 2018-2020 # # <NAME>", "Copyright 2018-2020 # # <NAME> # ####################### # ********** #", "WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND", "thisitem: return False # Keep track of whether we found", "charge, to any person obtaining a copy # of this", "permit persons to whom the Software is # furnished to", "THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE", "= \"locate item\" CATEGORIES = [\"items\"] ALIASES = [\"find item\"]", "<item_id> is in, or who is holding it. You can", "the Software is # furnished to do so, subject to", "above copyright notice and this permission notice shall be included", "IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED,", "that you own. Wizards can locate any item. Ex. `locate", "A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE", "this permission notice shall be included in # all copies", "console.database.users.all(): if targetuser[\"name\"] == console.user[\"name\"]: continue if itemid in targetuser[\"inventory\"]:", "OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION", "PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE #", "if the item exists. thisitem = COMMON.check_item(NAME, console, itemid, owner=True,", "True # Check if someone else is holding the item.", "EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE", "copy, modify, merge, publish, distribute, sublicense, and/or # sell copies", "DEALINGS # IN THE SOFTWARE. # ********** NAME = \"locate", "we found anything in case the item is duplified and", "console.user[\"name\"]: continue if itemid in targetuser[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is", "copies or substantial portions of the Software. # # THE", "own. Wizards can locate any item. Ex. `locate item 4`\"\"\"", "If the item is duplified we need to keep looking", "`locate item 4`\"\"\" def COMMAND(console, args): # Perform initial checks.", "owner=True, holding=False) if not thisitem: return False # Keep track", "THE USE OR OTHER DEALINGS # IN THE SOFTWARE. #", "duplified and we can't return right away. found_something = False", "OR OTHER DEALINGS # IN THE SOFTWARE. # ********** NAME", "room the item <item_id> is in, or who is holding", "You can only locate an item that you own. Wizards", "ARISING # FROM, OUT OF OR IN CONNECTION WITH THE", "# sell copies of the Software, and to permit persons", "can only locate an item that you own. Wizards can", "we are holding the item. if itemid in console.user[\"inventory\"]: console.msg(\"{0}:", "= COMMON.check_item(NAME, console, itemid, owner=True, holding=False) if not thisitem: return", "OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT", "targetuser[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is in the inventory of: {3}.\".format(NAME,", "thisitem[\"duplified\"]: return True found_something = True # Check if the", "= COMMON.check_argtypes(NAME, console, args, checks=[[0, int]], retargs=0) if itemid is", "the item is duplified and we can't return right away.", "OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT,", "else is holding the item. for targetuser in console.database.users.all(): if", "the item <item_id> is in, or who is holding it.", "to the following conditions: # # The above copyright notice", "in targetuser[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is in the inventory of:", "argument type checks and casts. itemid = COMMON.check_argtypes(NAME, console, args,", "itemid, owner=True, holding=False) if not thisitem: return False # Keep", "and we can't return right away. found_something = False #", "holding it. You can only locate an item that you", "OTHER DEALINGS # IN THE SOFTWARE. # ********** NAME =", "targetroom in console.database.rooms.all(): if itemid in targetroom[\"items\"]: console.msg(\"{0}: {1} ({2})", "not thisitem: return False # Keep track of whether we", "# # Copyright 2018-2020 # # <NAME> # ####################### #", "[\"find item\"] USAGE = \"locate item <item_id>\" DESCRIPTION = \"\"\"Find", "ERROR: Item exists but has no location. Use `requisition` to", "exists. thisitem = COMMON.check_item(NAME, console, itemid, owner=True, holding=False) if not", "if not thisitem[\"duplified\"]: return True found_something = True # Couldn't", "USE OR OTHER DEALINGS # IN THE SOFTWARE. # **********", "console.user[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is in your inventory.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"]))", "the item. if itemid in console.user[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is", "Software, and to permit persons to whom the Software is", "OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT", "rights to use, copy, modify, merge, publish, distribute, sublicense, and/or", "Couldn't find the item. if not found_something: console.log.error(\"Item exists but", "item that you own. Wizards can locate any item. Ex.", "Check if we are holding the item. if itemid in", "restriction, including without limitation the # rights to use, copy,", "# IN THE SOFTWARE. # ********** NAME = \"locate item\"", "locate any item. Ex. `locate item 4`\"\"\" def COMMAND(console, args):", "exists but has no location: {item}\", item=itemid) console.msg(\"{0}: ERROR: Item", "OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF", "return False # Keep track of whether we found anything", "if itemid in targetuser[\"inventory\"]: console.msg(\"{0}: {1} ({2}) is in the", "[\"items\"] ALIASES = [\"find item\"] USAGE = \"locate item <item_id>\"", "inventory.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"])) # If the item is duplified we", "Software without restriction, including without limitation the # rights to", "item\"] USAGE = \"locate item <item_id>\" DESCRIPTION = \"\"\"Find out", "args): # Perform initial checks. if not COMMON.check(NAME, console, args,", "True found_something = True # Check if someone else is", "software and associated documentation files (the \"Software\"), to # deal", "COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER", "\"locate item <item_id>\" DESCRIPTION = \"\"\"Find out what room the", "no location: {item}\", item=itemid) console.msg(\"{0}: ERROR: Item exists but has", "holding the item. for targetuser in console.database.users.all(): if targetuser[\"name\"] ==", "is duplified and we can't return right away. found_something =", "NAME = \"locate item\" CATEGORIES = [\"items\"] ALIASES = [\"find", "use, copy, modify, merge, publish, distribute, sublicense, and/or # sell", "if someone else is holding the item. for targetuser in", "# Check if we are holding the item. if itemid", "granted, free of charge, to any person obtaining a copy", "targetuser[\"name\"])) # If the item is duplified we need to", "modify, merge, publish, distribute, sublicense, and/or # sell copies of", "obtaining a copy # of this software and associated documentation", "({4})\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetroom[\"name\"], targetroom[\"id\"])) # If the item is", "is # furnished to do so, subject to the following", "of whether we found anything in case the item is", "including without limitation the # rights to use, copy, modify,", "to whom the Software is # furnished to do so,", "copy # of this software and associated documentation files (the", "merge, publish, distribute, sublicense, and/or # sell copies of the", "THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY", "Permission is hereby granted, free of charge, to any person", "# Perform initial checks. if not COMMON.check(NAME, console, args, argc=1):", "thisitem[\"name\"], thisitem[\"id\"])) # If the item is duplified we need", "The above copyright notice and this permission notice shall be", "to # deal in the Software without restriction, including without", "found_something = True # Check if someone else is holding", "{3}.\".format(NAME, thisitem[\"name\"], thisitem[\"id\"], targetuser[\"name\"])) # If the item is duplified", "not thisitem[\"duplified\"]: return True found_something = True # Check if", "duplified we need to keep looking for other copies. if", "is duplified we need to keep looking for other copies.", "\"locate item\" CATEGORIES = [\"items\"] ALIASES = [\"find item\"] USAGE", "WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT", "AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES", "targetroom[\"id\"])) # If the item is duplified we need to", "to permit persons to whom the Software is # furnished", "WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING" ]
[ "rfr = RandomForestRegressor(random_state=42) rfr.fit(X_train, y_train) rfr_results.append(rfr.predict(X)) dtr = DecisionTreeRegressor(random_state=42) dtr.fit(X_train,", "[] # calculate monthly estimations for 3 models for dependant_month", "= json.load(f) all_zones_df = pd.read_csv(\"../data/scsb_all_zones.csv\") zone_25_df = pd.read_csv(\"../data/scsb_zone_25.csv\") zone_26_df =", "rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5) plt.plot(xgb_row, '-', label='xgboost',", "[] rfr_row = [] dtr_row = [] for month in", "json import pandas as pd import matplotlib.pyplot as plt from", "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42) xgb", "month_dependant_variables] data = zone_26_df xgb_results = [] rfr_results = []", "import pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model", "import DecisionTreeRegressor from xgboost import XGBRegressor from catboost import CatBoostRegressor", "zone_27_df = pd.read_csv(\"../data/scsb_zone_27.csv\") month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels = [x[0:3] for", "catboost import CatBoostRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import", "against the 3 models for row_target_index in range(20): xgb_row =", "dtr_row = [] for month in range(12): xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index])", "label='randomforest', color='green', alpha=0.5) plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5) plt.legend(loc='best') plt.xticks(month_dependant_variables,", "dtr_results = [] # calculate monthly estimations for 3 models", "from xgboost import XGBRegressor from catboost import CatBoostRegressor from sklearn.model_selection", "import train_test_split from sklearn.metrics import mean_squared_error as MSE, r2_score import", "'annual_precipitation', 'potential_evapo_transpiration', dependant_month]] X = features_df.drop([dependant_month], axis=1) y = features_df.get(dependant_month)", "= [] dtr_results = [] # calculate monthly estimations for", "plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5) plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5)", "plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5) plt.legend(loc='best') plt.xticks(month_dependant_variables, month_labels) plt.xlabel('Month') plt.ylabel('Monthly", "row_target_index in range(20): xgb_row = [] rfr_row = [] dtr_row", "pd.read_csv(\"../data/scsb_zone_25.csv\") zone_26_df = pd.read_csv(\"../data/scsb_zone_26.csv\") zone_27_df = pd.read_csv(\"../data/scsb_zone_27.csv\") month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist']", "label='xgboost', color='red', alpha=0.5) plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5) plt.plot(dtr_row, '-',", "data = zone_26_df xgb_results = [] rfr_results = [] dtr_results", "dependant_month in month_dependant_variables: features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]]", "xgb_results = [] rfr_results = [] dtr_results = [] #", "with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f: # data = json.load(f) all_zones_df", "features_df.drop([dependant_month], axis=1) y = features_df.get(dependant_month) X_train, X_test, y_train, y_test =", "range(20): xgb_row = [] rfr_row = [] dtr_row = []", "xgboost import XGBRegressor from catboost import CatBoostRegressor from sklearn.model_selection import", "= [] rfr_results = [] dtr_results = [] # calculate", "import XGBRegressor from catboost import CatBoostRegressor from sklearn.model_selection import train_test_split", "from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree", "xgb_results.append(xgb.predict(X)) rfr = RandomForestRegressor(random_state=42) rfr.fit(X_train, y_train) rfr_results.append(rfr.predict(X)) dtr = DecisionTreeRegressor(random_state=42)", "pd.read_csv(\"../data/scsb_zone_27.csv\") month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels = [x[0:3] for x in", "rfr_results = [] dtr_results = [] # calculate monthly estimations", "y_train) xgb_results.append(xgb.predict(X)) rfr = RandomForestRegressor(random_state=42) rfr.fit(X_train, y_train) rfr_results.append(rfr.predict(X)) dtr =", "monthly estimations for 3 models for dependant_month in month_dependant_variables: features_df", "month in range(12): xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue',", "data = json.load(f) all_zones_df = pd.read_csv(\"../data/scsb_all_zones.csv\") zone_25_df = pd.read_csv(\"../data/scsb_zone_25.csv\") zone_26_df", "matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.ensemble import", "CatBoostRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error as", "= data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]] X = features_df.drop([dependant_month], axis=1)", "models for row_target_index in range(20): xgb_row = [] rfr_row =", "X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42) xgb =", "in range(20): xgb_row = [] rfr_row = [] dtr_row =", "in month_dependant_variables: features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]] X", "xgb = XGBRegressor(random_state=42) xgb.fit(X_train, y_train) xgb_results.append(xgb.predict(X)) rfr = RandomForestRegressor(random_state=42) rfr.fit(X_train,", "MSE, r2_score import math # with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f:", "= [] # calculate monthly estimations for 3 models for", "estimations for 3 models for dependant_month in month_dependant_variables: features_df =", "zone_26_df = pd.read_csv(\"../data/scsb_zone_26.csv\") zone_27_df = pd.read_csv(\"../data/scsb_zone_27.csv\") month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels", "= XGBRegressor(random_state=42) xgb.fit(X_train, y_train) xgb_results.append(xgb.predict(X)) rfr = RandomForestRegressor(random_state=42) rfr.fit(X_train, y_train)", "y_test = train_test_split(X, y, test_size=0.30, random_state=42) xgb = XGBRegressor(random_state=42) xgb.fit(X_train,", "for row_target_index in range(20): xgb_row = [] rfr_row = []", "y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42) xgb = XGBRegressor(random_state=42)", "RandomForestRegressor(random_state=42) rfr.fit(X_train, y_train) rfr_results.append(rfr.predict(X)) dtr = DecisionTreeRegressor(random_state=42) dtr.fit(X_train, y_train) dtr_results.append(dtr.predict(X))", "for month in range(12): xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb',", "from catboost import CatBoostRegressor from sklearn.model_selection import train_test_split from sklearn.metrics", "= pd.read_csv(\"../data/scsb_zone_27.csv\") month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels = [x[0:3] for x", "alpha=0.5) plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5) plt.plot(dtr_row, '-', label='decisiontree', color='purple',", "random_state=42) xgb = XGBRegressor(random_state=42) xgb.fit(X_train, y_train) xgb_results.append(xgb.predict(X)) rfr = RandomForestRegressor(random_state=42)", "f: # data = json.load(f) all_zones_df = pd.read_csv(\"../data/scsb_all_zones.csv\") zone_25_df =", "XGBRegressor(random_state=42) xgb.fit(X_train, y_train) xgb_results.append(xgb.predict(X)) rfr = RandomForestRegressor(random_state=42) rfr.fit(X_train, y_train) rfr_results.append(rfr.predict(X))", "'-', label='randomforest', color='green', alpha=0.5) plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5) plt.legend(loc='best')", "import math # with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f: # data", "= [] for month in range(12): xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index],", "from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor from catboost", "pd import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from", "open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f: # data = json.load(f) all_zones_df =", "import CatBoostRegressor from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error", "in range(12): xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5)", "dtr.fit(X_train, y_train) dtr_results.append(dtr.predict(X)) # compare the outputs of scsb against", "from sklearn.metrics import mean_squared_error as MSE, r2_score import math #", "axis=1) y = features_df.get(dependant_month) X_train, X_test, y_train, y_test = train_test_split(X,", "plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5) plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5)", "rfr_row = [] dtr_row = [] for month in range(12):", "all_zones_df = pd.read_csv(\"../data/scsb_all_zones.csv\") zone_25_df = pd.read_csv(\"../data/scsb_zone_25.csv\") zone_26_df = pd.read_csv(\"../data/scsb_zone_26.csv\") zone_27_df", "'potential_evapo_transpiration', dependant_month]] X = features_df.drop([dependant_month], axis=1) y = features_df.get(dependant_month) X_train,", "label='decisiontree', color='purple', alpha=0.5) plt.legend(loc='best') plt.xticks(month_dependant_variables, month_labels) plt.xlabel('Month') plt.ylabel('Monthly Distribution') name", "# compare the outputs of scsb against the 3 models", "X = features_df.drop([dependant_month], axis=1) y = features_df.get(dependant_month) X_train, X_test, y_train,", "json.load(f) all_zones_df = pd.read_csv(\"../data/scsb_all_zones.csv\") zone_25_df = pd.read_csv(\"../data/scsb_zone_25.csv\") zone_26_df = pd.read_csv(\"../data/scsb_zone_26.csv\")", "scsb against the 3 models for row_target_index in range(20): xgb_row", "= train_test_split(X, y, test_size=0.30, random_state=42) xgb = XGBRegressor(random_state=42) xgb.fit(X_train, y_train)", "rfr_results.append(rfr.predict(X)) dtr = DecisionTreeRegressor(random_state=42) dtr.fit(X_train, y_train) dtr_results.append(dtr.predict(X)) # compare the", "= pd.read_csv(\"../data/scsb_zone_25.csv\") zone_26_df = pd.read_csv(\"../data/scsb_zone_26.csv\") zone_27_df = pd.read_csv(\"../data/scsb_zone_27.csv\") month_dependant_variables =", "from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from xgboost", "r2_score import math # with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f: #", "rfr.fit(X_train, y_train) rfr_results.append(rfr.predict(X)) dtr = DecisionTreeRegressor(random_state=42) dtr.fit(X_train, y_train) dtr_results.append(dtr.predict(X)) #", "mean_squared_error as MSE, r2_score import math # with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r')", "'-', label='decisiontree', color='purple', alpha=0.5) plt.legend(loc='best') plt.xticks(month_dependant_variables, month_labels) plt.xlabel('Month') plt.ylabel('Monthly Distribution')", "month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels = [x[0:3] for x in month_dependant_variables]", "sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import", "xgb.fit(X_train, y_train) xgb_results.append(xgb.predict(X)) rfr = RandomForestRegressor(random_state=42) rfr.fit(X_train, y_train) rfr_results.append(rfr.predict(X)) dtr", "= RandomForestRegressor(random_state=42) rfr.fit(X_train, y_train) rfr_results.append(rfr.predict(X)) dtr = DecisionTreeRegressor(random_state=42) dtr.fit(X_train, y_train)", "= [] dtr_row = [] for month in range(12): xgb_row.append(xgb_results[month][row_target_index])", "range(12): xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5) plt.plot(xgb_row,", "compare the outputs of scsb against the 3 models for", "alpha=0.5) plt.legend(loc='best') plt.xticks(month_dependant_variables, month_labels) plt.xlabel('Month') plt.ylabel('Monthly Distribution') name = data['name'].iloc[row_target_index]", "plt.legend(loc='best') plt.xticks(month_dependant_variables, month_labels) plt.xlabel('Month') plt.ylabel('Monthly Distribution') name = data['name'].iloc[row_target_index] plt.title(name)", "x in month_dependant_variables] data = zone_26_df xgb_results = [] rfr_results", "= zone_26_df xgb_results = [] rfr_results = [] dtr_results =", "= pd.read_csv(\"../data/scsb_zone_26.csv\") zone_27_df = pd.read_csv(\"../data/scsb_zone_27.csv\") month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels =", "= pd.read_csv(\"../data/scsb_all_zones.csv\") zone_25_df = pd.read_csv(\"../data/scsb_zone_25.csv\") zone_26_df = pd.read_csv(\"../data/scsb_zone_26.csv\") zone_27_df =", "y = features_df.get(dependant_month) X_train, X_test, y_train, y_test = train_test_split(X, y,", "y_train) rfr_results.append(rfr.predict(X)) dtr = DecisionTreeRegressor(random_state=42) dtr.fit(X_train, y_train) dtr_results.append(dtr.predict(X)) # compare", "import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor", "LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from", "import json import pandas as pd import matplotlib.pyplot as plt", "= ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels = [x[0:3] for x in month_dependant_variables] data", "calculate monthly estimations for 3 models for dependant_month in month_dependant_variables:", "3 models for row_target_index in range(20): xgb_row = [] rfr_row", "plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5) plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5)", "'-', label='scsb', color='blue', alpha=0.5) plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5) plt.plot(rfr_row,", "<filename>modelling/scsb/models/monthly-comparisons.py import json import pandas as pd import matplotlib.pyplot as", "color='red', alpha=0.5) plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5) plt.plot(dtr_row, '-', label='decisiontree',", "= [] rfr_row = [] dtr_row = [] for month", "dtr = DecisionTreeRegressor(random_state=42) dtr.fit(X_train, y_train) dtr_results.append(dtr.predict(X)) # compare the outputs", "dtr_results.append(dtr.predict(X)) # compare the outputs of scsb against the 3", "[] dtr_results = [] # calculate monthly estimations for 3", "= features_df.get(dependant_month) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30,", "train_test_split(X, y, test_size=0.30, random_state=42) xgb = XGBRegressor(random_state=42) xgb.fit(X_train, y_train) xgb_results.append(xgb.predict(X))", "import LinearRegression from sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor", "import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression from sklearn.ensemble", "XGBRegressor from catboost import CatBoostRegressor from sklearn.model_selection import train_test_split from", "pd.read_csv(\"../data/scsb_all_zones.csv\") zone_25_df = pd.read_csv(\"../data/scsb_zone_25.csv\") zone_26_df = pd.read_csv(\"../data/scsb_zone_26.csv\") zone_27_df = pd.read_csv(\"../data/scsb_zone_27.csv\")", "as pd import matplotlib.pyplot as plt from sklearn.linear_model import LinearRegression", "= DecisionTreeRegressor(random_state=42) dtr.fit(X_train, y_train) dtr_results.append(dtr.predict(X)) # compare the outputs of", "as MSE, r2_score import math # with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as", "= features_df.drop([dependant_month], axis=1) y = features_df.get(dependant_month) X_train, X_test, y_train, y_test", "models for dependant_month in month_dependant_variables: features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation',", "in month_dependant_variables] data = zone_26_df xgb_results = [] rfr_results =", "[] for month in range(12): xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index], '-',", "zone_25_df = pd.read_csv(\"../data/scsb_zone_25.csv\") zone_26_df = pd.read_csv(\"../data/scsb_zone_26.csv\") zone_27_df = pd.read_csv(\"../data/scsb_zone_27.csv\") month_dependant_variables", "xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index]) dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5) plt.plot(xgb_row, '-',", "for 3 models for dependant_month in month_dependant_variables: features_df = data[['median_elevation',", "color='purple', alpha=0.5) plt.legend(loc='best') plt.xticks(month_dependant_variables, month_labels) plt.xlabel('Month') plt.ylabel('Monthly Distribution') name =", "alpha=0.5) plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5) plt.legend(loc='best') plt.xticks(month_dependant_variables, month_labels) plt.xlabel('Month')", "plt from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor from", "'r') as f: # data = json.load(f) all_zones_df = pd.read_csv(\"../data/scsb_all_zones.csv\")", "zone_26_df xgb_results = [] rfr_results = [] dtr_results = []", "outputs of scsb against the 3 models for row_target_index in", "sklearn.ensemble import RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import", "the outputs of scsb against the 3 models for row_target_index", "RandomForestRegressor from sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor from", "from sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error as MSE,", "'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]] X = features_df.drop([dependant_month], axis=1) y =", "data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]] X = features_df.drop([dependant_month], axis=1) y", "features_df.get(dependant_month) X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.30, random_state=42)", "alpha=0.5) plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5) plt.plot(rfr_row, '-', label='randomforest', color='green',", "'-', label='xgboost', color='red', alpha=0.5) plt.plot(rfr_row, '-', label='randomforest', color='green', alpha=0.5) plt.plot(dtr_row,", "import mean_squared_error as MSE, r2_score import math # with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json',", "the 3 models for row_target_index in range(20): xgb_row = []", "for x in month_dependant_variables] data = zone_26_df xgb_results = []", "y_train) dtr_results.append(dtr.predict(X)) # compare the outputs of scsb against the", "as plt from sklearn.linear_model import LinearRegression from sklearn.ensemble import RandomForestRegressor", "3 models for dependant_month in month_dependant_variables: features_df = data[['median_elevation', 'glacial_coverage',", "month_dependant_variables: features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]] X =", "train_test_split from sklearn.metrics import mean_squared_error as MSE, r2_score import math", "sklearn.metrics import mean_squared_error as MSE, r2_score import math # with", "DecisionTreeRegressor(random_state=42) dtr.fit(X_train, y_train) dtr_results.append(dtr.predict(X)) # compare the outputs of scsb", "dependant_month]] X = features_df.drop([dependant_month], axis=1) y = features_df.get(dependant_month) X_train, X_test,", "y, test_size=0.30, random_state=42) xgb = XGBRegressor(random_state=42) xgb.fit(X_train, y_train) xgb_results.append(xgb.predict(X)) rfr", "xgb_row = [] rfr_row = [] dtr_row = [] for", "pd.read_csv(\"../data/scsb_zone_26.csv\") zone_27_df = pd.read_csv(\"../data/scsb_zone_27.csv\") month_dependant_variables = ['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels = [x[0:3]", "dtr_row.append(dtr_results[month][row_target_index]) plt.plot(data[month_dependant_variables].iloc[row_target_index], '-', label='scsb', color='blue', alpha=0.5) plt.plot(xgb_row, '-', label='xgboost', color='red',", "for dependant_month in month_dependant_variables: features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration',", "features_df = data[['median_elevation', 'glacial_coverage', 'annual_precipitation', 'potential_evapo_transpiration', dependant_month]] X = features_df.drop([dependant_month],", "color='green', alpha=0.5) plt.plot(dtr_row, '-', label='decisiontree', color='purple', alpha=0.5) plt.legend(loc='best') plt.xticks(month_dependant_variables, month_labels)", "[] dtr_row = [] for month in range(12): xgb_row.append(xgb_results[month][row_target_index]) rfr_row.append(rfr_results[month][row_target_index])", "month_labels) plt.xlabel('Month') plt.ylabel('Monthly Distribution') name = data['name'].iloc[row_target_index] plt.title(name) plt.savefig('../plots/{}.png'.format(name)) plt.show()", "['jan_dist','feb_dist','mar_dist','apr_dist','may_dist','jun_dist','jul_dist','aug_dist','sep_dist','oct_dist','nov_dist','dec_dist'] month_labels = [x[0:3] for x in month_dependant_variables] data =", "pandas as pd import matplotlib.pyplot as plt from sklearn.linear_model import", "color='blue', alpha=0.5) plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5) plt.plot(rfr_row, '-', label='randomforest',", "# with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f: # data = json.load(f)", "plt.xticks(month_dependant_variables, month_labels) plt.xlabel('Month') plt.ylabel('Monthly Distribution') name = data['name'].iloc[row_target_index] plt.title(name) plt.savefig('../plots/{}.png'.format(name))", "sklearn.tree import DecisionTreeRegressor from xgboost import XGBRegressor from catboost import", "as f: # data = json.load(f) all_zones_df = pd.read_csv(\"../data/scsb_all_zones.csv\") zone_25_df", "math # with open('../../data/output/training_data/annual_mean_training_dataset_08-11-2020.json', 'r') as f: # data =", "# calculate monthly estimations for 3 models for dependant_month in", "[x[0:3] for x in month_dependant_variables] data = zone_26_df xgb_results =", "test_size=0.30, random_state=42) xgb = XGBRegressor(random_state=42) xgb.fit(X_train, y_train) xgb_results.append(xgb.predict(X)) rfr =", "of scsb against the 3 models for row_target_index in range(20):", "DecisionTreeRegressor from xgboost import XGBRegressor from catboost import CatBoostRegressor from", "label='scsb', color='blue', alpha=0.5) plt.plot(xgb_row, '-', label='xgboost', color='red', alpha=0.5) plt.plot(rfr_row, '-',", "sklearn.model_selection import train_test_split from sklearn.metrics import mean_squared_error as MSE, r2_score", "# data = json.load(f) all_zones_df = pd.read_csv(\"../data/scsb_all_zones.csv\") zone_25_df = pd.read_csv(\"../data/scsb_zone_25.csv\")", "= [x[0:3] for x in month_dependant_variables] data = zone_26_df xgb_results", "month_labels = [x[0:3] for x in month_dependant_variables] data = zone_26_df", "[] rfr_results = [] dtr_results = [] # calculate monthly" ]
[ "pandas as pd # Create temp directory to download input", "that AutoML can run trials without # MAGIC running out", "we disable SHAP by default.<br /> # MAGIC You can", "explain.<br /> # MAGIC For more thorough results, increase the", "f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path=\"model\") shutil.rmtree(xgbc_temp_dir) # COMMAND ---------- # MAGIC", "# Delete the temp data shutil.rmtree(input_temp_dir) # Preview data df_loaded.head(5)", "# MAGIC ``` # MAGIC model_name = \"Example\" # MAGIC", "the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Navigate to the parent", "all feature columns to be centered around zero with unit", "index=[\"validation\", \"test\"])) # COMMAND ---------- # Patch requisite packages to", "import FunctionTransformer for col in {'type', 'author'}: vectorizer = Pipeline(steps=[", "retrieve it later for inference. # MAGIC # MAGIC >", "# MAGIC Convert each low-cardinality categorical column into multiple binary", "the test set xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix=\"test_\") #", "nulls. # To enable SHAP to succeed, both the background", "ColumnTransformer(transformers, remainder=\"passthrough\", sparse_threshold=0) # COMMAND ---------- # MAGIC %md #", "enable SHAP to succeed, both the background data and examples", "tab of the AutoML Experiment page for details on why", "trials without # MAGIC running out of memory, we disable", "using data with nulls; if your dataset has any, both", "\"title_without_stopwords\", \"text_without_stopwords\"])) # COMMAND ---------- # MAGIC %md # MAGIC", "this flag to True and re-run the notebook to see", "lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns)) explainer = KernelExplainer(predict, train_sample, link=\"logit\") shap_values", "explain will be imputed using the mode (most frequent values).", "from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.impute import SimpleImputer from sklearn.pipeline", "# MAGIC #### Low-cardinality categoricals # MAGIC Convert each low-cardinality", "approval and governance workflows, and monitor ML deployments and their", "---------- import mlflow import sklearn from sklearn import set_config from", "# MAGIC ## Inference # MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html)", "tune the hyperparameters of the model) # MAGIC - Test", "``` # MAGIC model_name = \"Example\" # MAGIC # MAGIC", "the dataset used to report the true performance of the", "MAGIC registered_model_version = mlflow.register_model(model_uri, model_name) # MAGIC ``` # MAGIC", "MAGIC ``` # COMMAND ---------- # model_uri for the generated", "df_loaded.columns # COMMAND ---------- from sklearn.model_selection import train_test_split split_X =", "to train the model) # MAGIC - Validation (20% of", "# MAGIC For more information on how to read Shapley", "\"Example\" # MAGIC # MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\"", "`model_uri` for the model already trained in this notebook can", "details on why these columns are dropped. # COMMAND ----------", "auto-generated notebook. To reproduce these results, attach this notebook to", "and model output. Features are ranked in descending order of", "'bs'}, 'title_without_stopwords': {0: 'aliens are coming to invade earth'}, 'text_without_stopwords':", "to read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). # COMMAND", "MAGIC - To view the full list of tunable hyperparameters,", "logged metrics xgbc_val_metrics = {k.replace(\"val_\", \"\"): v for k, v", "to see the SHAP plots. # MAGIC - To reduce", "Train (60% of the dataset used to train the model)", "on how to read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html).", "MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_ # COMMAND ---------- import mlflow import", "XGBoost training # MAGIC This is an auto-generated notebook. To", "the MLflow experiment # MAGIC - To view the full", "values, as the imputed samples may not match the actual", "to Model Registry # MAGIC ``` # MAGIC model_name =", "to invade earth'}, 'text_without_stopwords': {0: 'aliens are coming to invade", "corresponds to one of the top word n-grams # MAGIC", "parameters, and models mlflow.sklearn.autolog(log_input_examples=True, silent=True) with mlflow.start_run(run_name=\"xgboost\") as mlflow_run: model.fit(X_train,", "will be imputed using the mode (most frequent values). This", "using TF-IDF vectorization. The length of the output # MAGIC", "sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline for feature in", "train a model that can predict on a dataset that", "link isn't very useful.) # MAGIC - Clone this notebook", "the actual data distribution. # MAGIC # MAGIC For more", "of the dataset used to tune the hyperparameters of the", "'aliens are coming to invade earth'}, 'hasImage': {0: 1.0}} df", "f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri)", "prefix=\"val_\") # Log metrics for the test set xgbc_test_metrics =", "COMMAND ---------- # MAGIC %md # MAGIC ### Loading model", "the background data and # MAGIC examples to explain will", "XGBClassifier help(XGBClassifier) # COMMAND ---------- import mlflow import sklearn from", "MAGIC Convert each feature to a fixed-length vector using TF-IDF", "pipeline to transform the validation dataset. This is used for", "---------- # MAGIC %md # MAGIC ### Categorical columns #", "work together from experimentation to online testing and production, integrate", "your project folder by selecting **File > Clone** in the", "# COMMAND ---------- import mlflow import sklearn from sklearn import", "# COMMAND ---------- # MAGIC %md # MAGIC ## Inference", "y_val) model # COMMAND ---------- # Enable automatic logging of", "MAGIC SHAP is a game-theoretic approach to explain machine learning", "# Patch requisite packages to the model environment YAML for", "# COMMAND ---------- # MAGIC %md # MAGIC ## Feature", "model.predict(input_X) # MAGIC ``` # MAGIC # MAGIC ### Load", "together from experimentation to online testing and production, integrate with", "model without registering # MAGIC ``` # MAGIC model_uri =", "# MAGIC `[]` are dropped in the pipelines. See the", "Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams can share", "\"\"): v for k, v in xgbc_test_metrics.items()} display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=[\"validation\",", "# Log metrics for the test set xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model,", "is an auto-generated notebook. To reproduce these results, attach this", "= ColumnSelector(supported_cols) # COMMAND ---------- # MAGIC %md # MAGIC", "# MAGIC ## Train - Validation - Test Split #", "\"hasImage\", \"title_without_stopwords\", \"text\", \"title\", \"type\", \"author\"] col_selector = ColumnSelector(supported_cols) #", "computational overhead of each trial, a single example is sampled", "---------- # MAGIC %md # MAGIC ## Preprocessors # COMMAND", "Convert each medium-cardinality categorical column into a numerical representation. #", "relationship between features and model output. Features are ranked in", "invade earth'}, 'language': {0: 'english'}, 'site_url': {0: 'cnn.com'}, 'main_img_url': {0:", "COMMAND ---------- # MAGIC %md # MAGIC ## Train classification", "sampled from the validation set to explain.<br /> # MAGIC", "# MAGIC where n is in the range [1, 2].", "MAGIC - Train (60% of the dataset used to train", "{0: 'cnn.com'}, 'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type': {0: 'bs'}, 'title_without_stopwords': {0:", "the columns that are supported. This allows us to train", "# COMMAND ---------- # MAGIC %md # MAGIC ### Select", "size and rerun for more thorough results. example = X_val.sample(n=1).fillna(mode)", "each medium-cardinality categorical column into a numerical representation. # MAGIC", "is sampled from the validation set to explain.<br /> #", "Explainer. Increase the sample size to reduce variance. train_sample =", "# MAGIC %md # MAGIC ### Select supported columns #", "# MAGIC Select only the columns that are supported. This", "}/model\" # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri) # MAGIC", "order of # MAGIC importance, and impact/color describe the correlation", "You can set the flag defined below to `shap_enabled =", "# COMMAND ---------- # MAGIC %md # MAGIC ## Train", "split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y) #", "X_test, y_test, prefix=\"test_\") # Display the logged metrics xgbc_val_metrics =", "(\"tfidf\", TfidfVectorizer(decode_error=\"ignore\", ngram_range = (1, 2), max_features=1024))]) transformers.append((f\"text_{col}\", vectorizer, [col]))", "folder by selecting **File > Clone** in the notebook toolbar.", "below show how to add the model trained in this", "sklearn.preprocessing import StandardScaler standardizer = StandardScaler() # COMMAND ---------- #", "# MAGIC model.predict(input_X) # MAGIC ``` # MAGIC # MAGIC", "Split out train data X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X,", "col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer), (\"classifier\", xgbc_classifier), ]) # Create", "---------- # MAGIC %md # MAGIC ## Load Data #", "trial to the MLflow experiment # MAGIC - To view", "vector is equal to 1024. Each column corresponds to one", "\"title\", \"title_without_stopwords\", \"text_without_stopwords\"])) # COMMAND ---------- # MAGIC %md #", "columns # COMMAND ---------- # MAGIC %md # MAGIC ####", "'cnn.com'}, 'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type': {0: 'bs'}, 'title_without_stopwords': {0: 'aliens", "Convert each feature to a fixed-length vector using TF-IDF vectorization.", "import uuid import yaml None import xgboost from mlflow.tracking import", "# MAGIC model_version = registered_model_version.version # MAGIC # MAGIC model", "column is hashed to 1024 float columns. # MAGIC Each", "Navigate to the parent notebook [here](#notebook/406583024052798) (If you launched the", "= MlflowClient() input_data_path = input_client.download_artifacts(\"c2dfe80b419d4a8dbc88a90e3274369a\", \"data\", input_temp_dir) df_loaded = pd.read_parquet(os.path.join(input_data_path,", "the model registry and to retrieve it later for inference.", "MAGIC ### Select supported columns # MAGIC Select only the", "# MAGIC registered_model_version = mlflow.register_model(model_uri, model_name) # MAGIC ``` #", "= registered_model_version.version # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri=f\"models:/{model_name}/{model_version}\") #", "multiple binary columns through one-hot encoding. # MAGIC For each", "= f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC registered_model_version = mlflow.register_model(model_uri, model_name)", "where teams can share ML models, work together from experimentation", "# To enable SHAP to succeed, both the background data", "log a different trial to the MLflow experiment # MAGIC", "parent notebook [here](#notebook/406583024052798) (If you launched the AutoML experiment using", "source # MAGIC %md # MAGIC # XGBoost training #", "results, attach this notebook to the **10-3-ML-Cluster** cluster and rerun", "import yaml None import xgboost from mlflow.tracking import MlflowClient xgbc_temp_dir", "data distribution. # MAGIC # MAGIC For more information on", "'type': {0: 'bs'}, 'title_without_stopwords': {0: 'aliens are coming to invade", "the dataset used to train the model) # MAGIC -", "match the actual data distribution. # MAGIC # MAGIC For", "This affects the computed # MAGIC SHAP values, as the", "experiment # MAGIC - To view the full list of", "FunctionTransformer(np.reshape, kw_args={\"newshape\":-1})), (\"tfidf\", TfidfVectorizer(decode_error=\"ignore\", ngram_range = (1, 2), max_features=1024))]) transformers.append((f\"text_{col}\",", "mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix=\"test_\") # Display the logged metrics xgbc_val_metrics", "%md # MAGIC # XGBoost training # MAGIC This is", "add the model trained in this notebook to the model", "MAGIC ``` # MAGIC # MAGIC ### Load model without", "trained in this notebook to the model registry and to", "for inference. # MAGIC # MAGIC > **NOTE:** The `model_uri`", "teams can share ML models, work together from experimentation to", "len(X_train.index))).fillna(mode) # Sample a single example from the validation set", "page for details on why these columns are dropped. #", "MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Change the model parameters and", "= pipeline.fit_transform(X_val, y_val) model # COMMAND ---------- # Enable automatic", "'hasImage': {0: 1.0}} df = pd.DataFrame(data=data) df.head() # COMMAND ----------", "os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], \"tmp\", str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir) # Download the artifact and read", "StandardScaler() # COMMAND ---------- # MAGIC %md # MAGIC ##", "unique values in the input column. # COMMAND ---------- from", "MAGIC - Log relevant metrics to MLflow to track runs", "subsample=0.6972187716458148, verbosity=0, random_state=799811440, ) model = Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\",", "your dataset has any, both the background data and #", "with open(xgbc_model_env_path, \"w\") as f: f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path=\"model\") shutil.rmtree(xgbc_temp_dir)", "# MAGIC - Validation (20% of the dataset used to", "databricks.automl_runtime.sklearn.column_selector import ColumnSelector supported_cols = [\"text_without_stopwords\", \"published\", \"language\", \"main_img_url\", \"site_url\",", "}/model\") # COMMAND ---------- # MAGIC %md # MAGIC ###", "# COMMAND ---------- # Patch requisite packages to the model", "# MAGIC ## Feature importance # MAGIC # MAGIC SHAP", "features and model output. Features are ranked in descending order", "and models mlflow.sklearn.autolog(log_input_examples=True, silent=True) with mlflow.start_run(run_name=\"xgboost\") as mlflow_run: model.fit(X_train, y_train,", "---------- # Enable automatic logging of input samples, metrics, parameters,", "import Pipeline set_config(display=\"diagram\") xgbc_classifier = XGBClassifier( colsample_bytree=0.7324555878929649, learning_rate=0.007636627530856404, max_depth=7, min_child_weight=6,", "explainer = KernelExplainer(predict, train_sample, link=\"logit\") shap_values = explainer.shap_values(example, l1_reg=False) summary_plot(shap_values,", "documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). # COMMAND ---------- # Set this flag to True", "# COMMAND ---------- from sklearn.feature_extraction import FeatureHasher from sklearn.impute import", "to report the true performance of the model on an", "split_y, train_size=0.6, random_state=799811440, stratify=split_y) # Split remaining data equally for", "a summary plot # MAGIC of the relationship between features", "(\"preprocessor\", preprocessor), (\"standardizer\", standardizer), ]) mlflow.sklearn.autolog(disable=True) X_val_processed = pipeline.fit_transform(X_val, y_val)", "the flag defined below to `shap_enabled = True` and re-run", "l1_reg=False) summary_plot(shap_values, example, class_names=model.classes_) # COMMAND ---------- # MAGIC %md", "experimentation to online testing and production, integrate with approval and", "MAGIC ### Text features # MAGIC Convert each feature to", "split_y = df_loaded[target_col] # Split out train data X_train, split_X_rem,", "the feature and the target variable. # MAGIC - Generating", "cannot explain models using data with nulls. # To enable", "categorical column (string or numeric), the number of output columns", "To reduce the computational overhead of each trial, a single", "in the pipelines. See the Alerts tab of the AutoML", "the notebook toolbar. # MAGIC # MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_", "coming to invade earth'}, 'hasImage': {0: 1.0}} df = pd.DataFrame(data=data)", "mlflow import databricks.automl_runtime # Use MLflow to track experiments mlflow.set_experiment(\"/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38\")", "rerun for more thorough results. example = X_val.sample(n=1).fillna(mode) # Use", "print(f\"runs:/{ mlflow_run.info.run_id }/model\") # COMMAND ---------- # MAGIC %md #", "---------- from mlflow.tracking import MlflowClient import os import uuid import", "the model) # MAGIC - Test (20% of the dataset", "(\"classifier\", xgbc_classifier), ]) # Create a separate pipeline to transform", "strategy=\"constant\", fill_value=\"\")), # Reshape to 1D since SimpleImputer changes the", "data equally for validation and test X_val, X_test, y_val, y_test", "are dropped. # COMMAND ---------- from databricks.automl_runtime.sklearn.column_selector import ColumnSelector supported_cols", "invade earth'}, 'text_without_stopwords': {0: 'aliens are coming to invade earth'},", "display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=[\"validation\", \"test\"])) # COMMAND ---------- # Patch requisite", "of input samples, metrics, parameters, and models mlflow.sklearn.autolog(log_input_examples=True, silent=True) with", "categorical column into multiple binary columns through one-hot encoding. #", "metrics, parameters, and models mlflow.sklearn.autolog(log_input_examples=True, silent=True) with mlflow.start_run(run_name=\"xgboost\") as mlflow_run:", "%md # MAGIC ## Preprocessors # COMMAND ---------- transformers =", "the hyperparameters of the model) # MAGIC - Test (20%", "MAGIC running out of memory, we disable SHAP by default.<br", "KernelExplainer(predict, train_sample, link=\"logit\") shap_values = explainer.shap_values(example, l1_reg=False) summary_plot(shap_values, example, class_names=model.classes_)", "# MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC #", "from databricks.automl_runtime.sklearn.column_selector import ColumnSelector supported_cols = [\"text_without_stopwords\", \"published\", \"language\", \"main_img_url\",", "output columns is equal to the number of unique values", "stratify=split_y_rem) # COMMAND ---------- # MAGIC %md # MAGIC ##", "MAGIC ## Feature importance # MAGIC # MAGIC SHAP is", "notebook into your project folder by selecting **File > Clone**", "Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams can share ML", "already trained in this notebook can be found in the", "of the top word n-grams # MAGIC where n is", "# MAGIC %md # MAGIC ## Train classification model #", "feature columns to be centered around zero with unit variance.", "medium-cardinality categorical column into a numerical representation. # MAGIC Each", "MlflowClient() xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, \"model/conda.yaml\", xgbc_temp_dir) xgbc_model_env_str = open(xgbc_model_env_path) xgbc_parsed_model_env_str", "(\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), (f\"{feature}_hasher\", FeatureHasher(n_features=1024, input_type=\"string\"))]) transformers.append((f\"{feature}_hasher\", hash_transformer, [feature]))", "MAGIC - Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC", "train_sample, link=\"logit\") shap_values = explainer.shap_values(example, l1_reg=False) summary_plot(shap_values, example, class_names=model.classes_) #", "# Training metrics are logged by MLflow autologging # Log", "\"language\", \"site_url\", \"hasImage\", \"title\", \"title_without_stopwords\", \"text_without_stopwords\"])) # COMMAND ---------- #", "for early stopping. pipeline = Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor),", "using the Experiments UI, this link isn't very useful.) #", "selecting **File > Clone** in the notebook toolbar. # MAGIC", "- Clone this notebook into your project folder by selecting", "# MAGIC %md # MAGIC ## Load Data # COMMAND", "by selecting **File > Clone** in the notebook toolbar. #", "flag to True and re-run the notebook to see the", "to the number of unique values in the input column.", "]) # Create a separate pipeline to transform the validation", "values in the input column. # COMMAND ---------- from sklearn.pipeline", "n is in the range [1, 2]. # COMMAND ----------", "\"Example\" # MAGIC model_version = registered_model_version.version # MAGIC # MAGIC", "to tune the hyperparameters of the model) # MAGIC -", "set to explain. Increase the sample size and rerun for", "of the relationship between features and model output. Features are", "examples to explain will be imputed using the mode (most", "to reduce variance. train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode) # Sample a", "# Reshape to 1D since SimpleImputer changes the shape of", "validation and test X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem,", "# MAGIC %md # MAGIC ### Loading model to make", "= os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], \"tmp\", str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir) # Download the artifact and", "both the background data and examples to explain are imputed", "'2016-10-27T18:05:26.351+03:00'}, 'title': {0: 'aliens are coming to invade earth'}, 'text':", "explainer.shap_values(example, l1_reg=False) summary_plot(shap_values, example, class_names=model.classes_) # COMMAND ---------- # MAGIC", "since SimpleImputer changes the shape of the input to 2D", "# MAGIC # MAGIC > **NOTE:** The `model_uri` for the", "one-hot encoding. # MAGIC For each input categorical column (string", "COMMAND ---------- # MAGIC %md # MAGIC ## Preprocessors #", "game-theoretic approach to explain machine learning models, providing a summary", "# MAGIC - Clone this notebook into your project folder", "list of tunable hyperparameters, check the output of the cell", "from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder one_hot_encoder =", "from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing", "%md # MAGIC ## Inference # MAGIC [The MLflow Model", "learning_rate=0.007636627530856404, max_depth=7, min_child_weight=6, n_estimators=106, n_jobs=100, subsample=0.6972187716458148, verbosity=0, random_state=799811440, ) model", "COMMAND ---------- from mlflow.tracking import MlflowClient import os import uuid", "as f: f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path=\"model\") shutil.rmtree(xgbc_temp_dir) # COMMAND ----------", "%md # MAGIC ### Feature standardization # MAGIC Scale all", "xgbc_temp_dir) xgbc_model_env_str = open(xgbc_model_env_path) xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader) xgbc_parsed_model_env_str[\"dependencies\"][-1][\"pip\"].append(f\"xgboost=={xgboost.__version__}\") with", "a single example is sampled from the validation set to", "and their performance. The snippets below show how to add", "\"training_data\")) # Delete the temp data shutil.rmtree(input_temp_dir) # Preview data", "from the validation set to explain. Increase the sample size", "(\"reshape\", FunctionTransformer(np.reshape, kw_args={\"newshape\":-1})), (\"tfidf\", TfidfVectorizer(decode_error=\"ignore\", ngram_range = (1, 2), max_features=1024))])", "this notebook to the model registry and to retrieve it", "from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline for feature", "input to 2D (\"reshape\", FunctionTransformer(np.reshape, kw_args={\"newshape\":-1})), (\"tfidf\", TfidfVectorizer(decode_error=\"ignore\", ngram_range =", "rerun it. # MAGIC - Compare trials in the [MLflow", "---------- # MAGIC %md # MAGIC #### Low-cardinality categoricals #", "and impact/color describe the correlation between the feature and the", "COMMAND ---------- # MAGIC %md # MAGIC ## Load Data", "sample size of explanations, or provide your own examples to", "a game-theoretic approach to explain machine learning models, providing a", "single example is sampled from the validation set to explain.<br", "are dropped in the pipelines. See the Alerts tab of", "columns. # MAGIC Each numeric column is imputed with zeros.", "COMMAND ---------- # Set this flag to True and re-run", "import KernelExplainer, summary_plot # SHAP cannot explain models using data", "= Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer), ]) mlflow.sklearn.autolog(disable=True)", "model_name) # MAGIC ``` # MAGIC # MAGIC ### Load", "Training metrics are logged by MLflow autologging # Log metrics", "import ColumnTransformer preprocessor = ColumnTransformer(transformers, remainder=\"passthrough\", sparse_threshold=0) # COMMAND ----------", "= X_val.sample(n=1).fillna(mode) # Use Kernel SHAP to explain feature importance", "import databricks.automl_runtime # Use MLflow to track experiments mlflow.set_experiment(\"/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38\") target_col", "str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir) xgbc_client = MlflowClient() xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, \"model/conda.yaml\", xgbc_temp_dir)", "import XGBClassifier help(XGBClassifier) # COMMAND ---------- import mlflow import sklearn", "COMMAND ---------- from sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer(transformers, remainder=\"passthrough\",", "random_state=799811440, ) model = Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\",", "and monitor ML deployments and their performance. The snippets below", "mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix=\"val_\") # Log metrics for the test", "impact/color describe the correlation between the feature and the target", "reproduce these results, attach this notebook to the **10-3-ML-Cluster** cluster", "# COMMAND ---------- # MAGIC %md # MAGIC #### Low-cardinality", "Increase the sample size to reduce variance. train_sample = X_train.sample(n=min(100,", "# MAGIC model_name = \"Example\" # MAGIC model_version = registered_model_version.version", "any, both the background data and # MAGIC examples to", "import xgboost from mlflow.tracking import MlflowClient xgbc_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], str(uuid.uuid4())[:8])", "make prediction # COMMAND ---------- model_uri = f\"runs:/51c0348482e042ea8e4b7983ab6bff99/model\" model =", "xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, \"model/conda.yaml\", xgbc_temp_dir) xgbc_model_env_str = open(xgbc_model_env_path) xgbc_parsed_model_env_str =", "Feature standardization # MAGIC Scale all feature columns to be", "set xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix=\"test_\") # Display the", "databricks.automl_runtime # Use MLflow to track experiments mlflow.set_experiment(\"/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38\") target_col =", "Sample background data for SHAP Explainer. Increase the sample size", "%md # MAGIC #### Low-cardinality categoricals # MAGIC Convert each", "extra columns that are not used in training. # MAGIC", "# COMMAND ---------- from mlflow.tracking import MlflowClient import os import", "values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). # COMMAND ---------- # Set", "from the validation set to explain.<br /> # MAGIC For", "this link isn't very useful.) # MAGIC - Clone this", "vectorization. The length of the output # MAGIC vector is", "model output. Features are ranked in descending order of #", "### Load from Model Registry # MAGIC ``` # MAGIC", "example from the validation set. predict = lambda x: model.predict_proba(pd.DataFrame(x,", "SimpleImputer from sklearn.pipeline import Pipeline for feature in [\"text\", \"main_img_url\"]:", "open(xgbc_model_env_path) xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader) xgbc_parsed_model_env_str[\"dependencies\"][-1][\"pip\"].append(f\"xgboost=={xgboost.__version__}\") with open(xgbc_model_env_path, \"w\") as", "model.predict(input_X) # MAGIC ``` # COMMAND ---------- # model_uri for", "by default.<br /> # MAGIC You can set the flag", "df_loaded.head(1).to_dict() # COMMAND ---------- # MAGIC %md # MAGIC ###", "### Feature standardization # MAGIC Scale all feature columns to", "min_child_weight=6, n_estimators=106, n_jobs=100, subsample=0.6972187716458148, verbosity=0, random_state=799811440, ) model = Pipeline([", "explain are imputed with the mode (most frequent values). mode", "shap_enabled = True # COMMAND ---------- if shap_enabled: from shap", "# MAGIC %md # MAGIC ### Text features # MAGIC", "- Test Split # MAGIC Split the input data into", "= df_loaded[target_col] # Split out train data X_train, split_X_rem, y_train,", "(20% of the dataset used to report the true performance", "overhead of each trial, a single example is sampled from", "succeed, both the background data and examples to explain are", "reduce variance. train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode) # Sample a single", "predict = lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns)) explainer = KernelExplainer(predict, train_sample,", "later for inference. # MAGIC # MAGIC > **NOTE:** The", "col in {'type', 'author'}: vectorizer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\",", "df = pd.DataFrame(data=data) df.head() # COMMAND ---------- model.predict(df) # COMMAND", "# MAGIC %md # MAGIC #### Medium-cardinality categoricals # MAGIC", "[here](#notebook/406583024052798) (If you launched the AutoML experiment using the Experiments", "to 1024 float columns. # MAGIC Each numeric column is", "explain models using data with nulls. # To enable SHAP", "= StandardScaler() # COMMAND ---------- # MAGIC %md # MAGIC", "earth'}, 'hasImage': {0: 1.0}} df = pd.DataFrame(data=data) df.head() # COMMAND", "from the validation set. predict = lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns))", "the number of output columns is equal to the number", "models, work together from experimentation to online testing and production,", "# MAGIC > **NOTE:** The `model_uri` for the model already", "# MAGIC ### Load model without registering # MAGIC ```", "Runtime Version: _10.3.x-cpu-ml-scala2.12_ # COMMAND ---------- import mlflow import databricks.automl_runtime", "= (1, 2), max_features=1024))]) transformers.append((f\"text_{col}\", vectorizer, [col])) # COMMAND ----------", "[col])) # COMMAND ---------- from sklearn.compose import ColumnTransformer preprocessor =", "to explain machine learning models, providing a summary plot #", "MAGIC # MAGIC ### Load from Model Registry # MAGIC", "categorical column into a numerical representation. # MAGIC Each string", "notebook to see the SHAP plots. # MAGIC - To", "from sklearn.preprocessing import FunctionTransformer for col in {'type', 'author'}: vectorizer", "why these columns are dropped. # COMMAND ---------- from databricks.automl_runtime.sklearn.column_selector", "a dataset that has extra columns that are not used", "import shutil import uuid import yaml None import xgboost from", "os.makedirs(xgbc_temp_dir) xgbc_client = MlflowClient() xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, \"model/conda.yaml\", xgbc_temp_dir) xgbc_model_env_str", "that can predict on a dataset that has extra columns", "= MlflowClient() xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, \"model/conda.yaml\", xgbc_temp_dir) xgbc_model_env_str = open(xgbc_model_env_path)", "## Inference # MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a", "to track experiments mlflow.set_experiment(\"/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38\") target_col = \"label\" # COMMAND ----------", "in descending order of # MAGIC importance, and impact/color describe", "/> # MAGIC For more thorough results, increase the sample", "{0: 'aliens are coming to invade earth'}, 'hasImage': {0: 1.0}}", "models using data with nulls. # To enable SHAP to", "import mlflow import sklearn from sklearn import set_config from sklearn.pipeline", "intensive operation, so to ensure that AutoML can run trials", "a numerical representation. # MAGIC Each string column is hashed", "[this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Change the model parameters", "variable. # MAGIC - Generating SHAP feature importance is a", "cell below # MAGIC # MAGIC ### Register to Model", "# Enable automatic logging of input samples, metrics, parameters, and", "the target variable. # MAGIC - Generating SHAP feature importance", "describe the correlation between the feature and the target variable.", "and re-run the training cell to log a different trial", "of the output # MAGIC vector is equal to 1024.", "frequent values). This affects the computed # MAGIC SHAP values,", "input_client = MlflowClient() input_data_path = input_client.download_artifacts(\"c2dfe80b419d4a8dbc88a90e3274369a\", \"data\", input_temp_dir) df_loaded =", "Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), # Reshape to 1D since", "workflows, and monitor ML deployments and their performance. The snippets", "COMMAND ---------- # MAGIC %md # MAGIC ### Text features", "# Display the logged metrics xgbc_val_metrics = {k.replace(\"val_\", \"\"): v", "---------- df_loaded.head(1).to_dict() # COMMAND ---------- # MAGIC %md # MAGIC", "True and re-run the notebook to see the SHAP plots", "you launched the AutoML experiment using the Experiments UI, this", "data shutil.rmtree(input_temp_dir) # Preview data df_loaded.head(5) # COMMAND ---------- df_loaded.head(1).to_dict()", "without registering # MAGIC ``` # MAGIC model_uri = f\"runs:/{", "that are supported. This allows us to train a model", "and the target variable. # MAGIC - Generating SHAP feature", "#### Low-cardinality categoricals # MAGIC Convert each low-cardinality categorical column", "MAGIC - Validation (20% of the dataset used to tune", "sparse_threshold=0) # COMMAND ---------- # MAGIC %md # MAGIC ###", "str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir) # Download the artifact and read it into", "from sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer(transformers, remainder=\"passthrough\", sparse_threshold=0) #", "Split # MAGIC Split the input data into 3 sets:", "preprocessor), (\"standardizer\", standardizer), (\"classifier\", xgbc_classifier), ]) # Create a separate", "'aliens are coming to invade earth'}, 'text': {0: 'aliens are", "'aliens are coming to invade earth'}, 'language': {0: 'english'}, 'site_url':", "SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), (f\"{feature}_hasher\", FeatureHasher(n_features=1024, input_type=\"string\"))]) transformers.append((f\"{feature}_hasher\", hash_transformer, [feature])) #", "hyperparameters, check the output of the cell below # COMMAND", "# MAGIC Each numeric column is imputed with zeros. #", "# MAGIC %md # MAGIC ## Preprocessors # COMMAND ----------", "MAGIC of the relationship between features and model output. Features", "COMMAND ---------- # Patch requisite packages to the model environment", "For more information on how to read Shapley values, see", "# MAGIC %md # MAGIC ## Train - Validation -", "# Use Kernel SHAP to explain feature importance on the", "runs # MAGIC - All the runs are logged under", "column into a numerical representation. # MAGIC Each string column", "computed # MAGIC SHAP values, as the imputed samples may", "Patch requisite packages to the model environment YAML for model", "prefix=\"test_\") # Display the logged metrics xgbc_val_metrics = {k.replace(\"val_\", \"\"):", "MAGIC ``` # MAGIC # MAGIC ### Load from Model", "to explain are imputed with the mode (most frequent values).", "To reproduce these results, attach this notebook to the **10-3-ML-Cluster**", "report the true performance of the model on an unseen", "for k, v in xgbc_test_metrics.items()} display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=[\"validation\", \"test\"])) #", "allows us to train a model that can predict on", "data with nulls. # To enable SHAP to succeed, both", "= open(xgbc_model_env_path) xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader) xgbc_parsed_model_env_str[\"dependencies\"][-1][\"pip\"].append(f\"xgboost=={xgboost.__version__}\") with open(xgbc_model_env_path, \"w\")", "# MAGIC SHAP values, as the imputed samples may not", "integrate with approval and governance workflows, and monitor ML deployments", "the model) # MAGIC - Validation (20% of the dataset", "COMMAND ---------- df_loaded.columns # COMMAND ---------- from sklearn.model_selection import train_test_split", "Train classification model # MAGIC - Log relevant metrics to", "# MAGIC You can set the flag defined below to", "MAGIC examples to explain will be imputed using the mode", "COMMAND ---------- from xgboost import XGBClassifier help(XGBClassifier) # COMMAND ----------", "np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.impute import SimpleImputer from", "toolbar. # MAGIC # MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_ # COMMAND", "MAGIC Scale all feature columns to be centered around zero", "the output # MAGIC vector is equal to 1024. Each", "the model parameters and re-run the training cell to log", "monitor ML deployments and their performance. The snippets below show", "not match the actual data distribution. # MAGIC # MAGIC", "COMMAND ---------- # MAGIC %md # MAGIC #### Medium-cardinality categoricals", "StandardScaler standardizer = StandardScaler() # COMMAND ---------- # MAGIC %md", "pandas DataFrame input_client = MlflowClient() input_data_path = input_client.download_artifacts(\"c2dfe80b419d4a8dbc88a90e3274369a\", \"data\", input_temp_dir)", "import Pipeline for feature in [\"text\", \"main_img_url\"]: hash_transformer = Pipeline(steps=[", "the relationship between features and model output. Features are ranked", "to invade earth'}, 'text': {0: 'aliens are coming to invade", "train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode) # Sample a single example from", "= [] # COMMAND ---------- # MAGIC %md # MAGIC", "model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC # MAGIC model", "mlflow_run: model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False) # Training metrics are", "'english'}, 'site_url': {0: 'cnn.com'}, 'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type': {0: 'bs'},", "### Loading model to make prediction # COMMAND ---------- model_uri", "MAGIC %md # MAGIC ## Feature importance # MAGIC #", "---------- # MAGIC %md # MAGIC ### Feature standardization #", "found in the cell below # MAGIC # MAGIC ###", "MlflowClient() input_data_path = input_client.download_artifacts(\"c2dfe80b419d4a8dbc88a90e3274369a\", \"data\", input_temp_dir) df_loaded = pd.read_parquet(os.path.join(input_data_path, \"training_data\"))", "explanations, or provide your own examples to explain. # MAGIC", "---------- from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder one_hot_encoder", "random_state=799811440, stratify=split_y) # Split remaining data equally for validation and", "feature importance on the example from the validation set. predict", "- To reduce the computational overhead of each trial, a", "train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y) # Split remaining data equally", "'title': {0: 'aliens are coming to invade earth'}, 'text': {0:", "the model already trained in this notebook can be found", "the AutoML Experiment page for details on why these columns", "shape of the input to 2D (\"reshape\", FunctionTransformer(np.reshape, kw_args={\"newshape\":-1})), (\"tfidf\",", "the logged metrics xgbc_val_metrics = {k.replace(\"val_\", \"\"): v for k,", "each low-cardinality categorical column into multiple binary columns through one-hot", "to succeed, both the background data and examples to explain", "to 2D (\"reshape\", FunctionTransformer(np.reshape, kw_args={\"newshape\":-1})), (\"tfidf\", TfidfVectorizer(decode_error=\"ignore\", ngram_range = (1,", "COMMAND ---------- # Enable automatic logging of input samples, metrics,", "v for k, v in xgbc_val_metrics.items()} xgbc_test_metrics = {k.replace(\"test_\", \"\"):", "SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), # Reshape to 1D since SimpleImputer changes", "ensure that AutoML can run trials without # MAGIC running", "> **NOTE:** The `model_uri` for the model already trained in", "below # COMMAND ---------- from xgboost import XGBClassifier help(XGBClassifier) #", "to train a model that can predict on a dataset", "import MlflowClient import os import uuid import shutil import pandas", "MAGIC %md # MAGIC ## Preprocessors # COMMAND ---------- transformers", "xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix=\"test_\") # Display the logged", "are coming to invade earth'}, 'text': {0: 'aliens are coming", "{k.replace(\"test_\", \"\"): v for k, v in xgbc_test_metrics.items()} display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics],", "operation, so to ensure that AutoML can run trials without", "# MAGIC Scale all feature columns to be centered around", "# MAGIC Convert each feature to a fixed-length vector using", "the validation set xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix=\"val_\") #", "sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import", "y_test, prefix=\"test_\") # Display the logged metrics xgbc_val_metrics = {k.replace(\"val_\",", "(\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer), ]) mlflow.sklearn.autolog(disable=True) X_val_processed =", "MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC # MAGIC", "MAGIC - To reduce the computational overhead of each trial,", "column corresponds to one of the top word n-grams #", "# MAGIC - Change the model parameters and re-run the", "= os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir) xgbc_client = MlflowClient() xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id,", "\"test\"])) # COMMAND ---------- # Patch requisite packages to the", "Create temp directory to download input data from MLflow input_temp_dir", "(\"preprocessor\", preprocessor), (\"standardizer\", standardizer), (\"classifier\", xgbc_classifier), ]) # Create a", "using the mode (most frequent values). This affects the computed", "= X_train.sample(n=min(100, len(X_train.index))).fillna(mode) # Sample a single example from the", "SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer for", "from Model Registry # MAGIC ``` # MAGIC model_name =", "and re-run this notebook to see the SHAP plots. #", "split_X = df_loaded.drop([target_col], axis=1) split_y = df_loaded[target_col] # Split out", "for more thorough results. example = X_val.sample(n=1).fillna(mode) # Use Kernel", "None import xgboost from mlflow.tracking import MlflowClient xgbc_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"],", "the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). # COMMAND ---------- # Set this flag", "``` # MAGIC # MAGIC ### Load model without registering", "---------- model_uri = f\"runs:/51c0348482e042ea8e4b7983ab6bff99/model\" model = mlflow.pyfunc.load_model(model_uri) #model.predict(input_X) # COMMAND", "from sklearn.pipeline import Pipeline for feature in [\"text\", \"main_img_url\"]: hash_transformer", "the input to 2D (\"reshape\", FunctionTransformer(np.reshape, kw_args={\"newshape\":-1})), (\"tfidf\", TfidfVectorizer(decode_error=\"ignore\", ngram_range", "the dataset used to tune the hyperparameters of the model)", "xgbc_test_metrics], index=[\"validation\", \"test\"])) # COMMAND ---------- # Patch requisite packages", "MAGIC %md # MAGIC ## Load Data # COMMAND ----------", "remainder=\"passthrough\", sparse_threshold=0) # COMMAND ---------- # MAGIC %md # MAGIC", "Registry # MAGIC ``` # MAGIC model_name = \"Example\" #", "to explain feature importance on the example from the validation", "Validation - Test Split # MAGIC Split the input data", "is used for early stopping. pipeline = Pipeline([ (\"column_selector\", col_selector),", "of the dataset used to report the true performance of", "# MAGIC # MAGIC ### Load model without registering #", "# COMMAND ---------- import numpy as np from sklearn.feature_extraction.text import", "very useful.) # MAGIC - Clone this notebook into your", "= X_train.mode().iloc[0] # Sample background data for SHAP Explainer. Increase", "# Sample background data for SHAP Explainer. Increase the sample", "range [1, 2]. # COMMAND ---------- import numpy as np", "on an unseen dataset) # COMMAND ---------- df_loaded.columns # COMMAND", "MAGIC # XGBoost training # MAGIC This is an auto-generated", "transformers = [] # COMMAND ---------- # MAGIC %md #", "= mlflow.register_model(model_uri, model_name) # MAGIC ``` # MAGIC # MAGIC", "\"w\") as f: f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path=\"model\") shutil.rmtree(xgbc_temp_dir) # COMMAND", "requisite packages to the model environment YAML for model serving", "disable SHAP by default.<br /> # MAGIC You can set", "import SimpleImputer from sklearn.pipeline import Pipeline for feature in [\"text\",", "Experiments UI, this link isn't very useful.) # MAGIC -", "shutil.rmtree(input_temp_dir) # Preview data df_loaded.head(5) # COMMAND ---------- df_loaded.head(1).to_dict() #", "= XGBClassifier( colsample_bytree=0.7324555878929649, learning_rate=0.007636627530856404, max_depth=7, min_child_weight=6, n_estimators=106, n_jobs=100, subsample=0.6972187716458148, verbosity=0,", "feature in [\"text\", \"main_img_url\"]: hash_transformer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\",", "This is an auto-generated notebook. To reproduce these results, attach", "import train_test_split split_X = df_loaded.drop([target_col], axis=1) split_y = df_loaded[target_col] #", "X_train.sample(n=min(100, len(X_train.index))).fillna(mode) # Sample a single example from the validation", "verbosity=0, random_state=799811440, ) model = Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor),", "importance is a very memory intensive operation, so to ensure", "in xgbc_test_metrics.items()} display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=[\"validation\", \"test\"])) # COMMAND ---------- #", "coming to invade earth'}, 'text': {0: 'aliens are coming to", "# Use MLflow to track experiments mlflow.set_experiment(\"/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38\") target_col = \"label\"", "COMMAND ---------- # model_uri for the generated model print(f\"runs:/{ mlflow_run.info.run_id", "model registry and to retrieve it later for inference. #", "[MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Navigate to the parent notebook", "df_loaded = pd.read_parquet(os.path.join(input_data_path, \"training_data\")) # Delete the temp data shutil.rmtree(input_temp_dir)", "SimpleImputer changes the shape of the input to 2D (\"reshape\",", "Low-cardinality categoricals # MAGIC Convert each low-cardinality categorical column into", "# MAGIC # MAGIC ### Load from Model Registry #", "AutoML experiment using the Experiments UI, this link isn't very", "of explanations, or provide your own examples to explain. #", "xgbc_parsed_model_env_str[\"dependencies\"][-1][\"pip\"].append(f\"xgboost=={xgboost.__version__}\") with open(xgbc_model_env_path, \"w\") as f: f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path=\"model\")", "MAGIC For each input categorical column (string or numeric), the", "import pandas as pd # Create temp directory to download", "mlflow.register_model(model_uri, model_name) # MAGIC ``` # MAGIC # MAGIC ###", "to invade earth'}, 'language': {0: 'english'}, 'site_url': {0: 'cnn.com'}, 'main_img_url':", "summary_plot(shap_values, example, class_names=model.classes_) # COMMAND ---------- # MAGIC %md #", "training. # MAGIC `[]` are dropped in the pipelines. See", "in xgbc_val_metrics.items()} xgbc_test_metrics = {k.replace(\"test_\", \"\"): v for k, v", "results. example = X_val.sample(n=1).fillna(mode) # Use Kernel SHAP to explain", "{0: '<EMAIL>jim.<EMAIL>'}, 'published': {0: '2016-10-27T18:05:26.351+03:00'}, 'title': {0: 'aliens are coming", "classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False) # Training metrics are logged by MLflow", "into a numerical representation. # MAGIC Each string column is", "# Databricks notebook source # MAGIC %md # MAGIC #", "supported_cols = [\"text_without_stopwords\", \"published\", \"language\", \"main_img_url\", \"site_url\", \"hasImage\", \"title_without_stopwords\", \"text\",", "Each string column is hashed to 1024 float columns. #", "data for SHAP Explainer. Increase the sample size to reduce", "# Download the artifact and read it into a pandas", "For each input categorical column (string or numeric), the number", "help(XGBClassifier) # COMMAND ---------- import mlflow import sklearn from sklearn", "importance # MAGIC # MAGIC SHAP is a game-theoretic approach", "os import uuid import shutil import pandas as pd #", "COMMAND ---------- transformers = [] # COMMAND ---------- # MAGIC", "MAGIC # MAGIC ### Register to Model Registry # MAGIC", "MAGIC ## Load Data # COMMAND ---------- from mlflow.tracking import", "# COMMAND ---------- from xgboost import XGBClassifier help(XGBClassifier) # COMMAND", "# COMMAND ---------- # MAGIC %md # MAGIC ### Categorical", "1024. Each column corresponds to one of the top word", "\"text_without_stopwords\"])) # COMMAND ---------- # MAGIC %md # MAGIC ####", "MAGIC %md # MAGIC ## Inference # MAGIC [The MLflow", "a different trial to the MLflow experiment # MAGIC -", "(20% of the dataset used to tune the hyperparameters of", "model already trained in this notebook can be found in", "an auto-generated notebook. To reproduce these results, attach this notebook", "model to make prediction # COMMAND ---------- model_uri = f\"runs:/51c0348482e042ea8e4b7983ab6bff99/model\"", "import ColumnSelector supported_cols = [\"text_without_stopwords\", \"published\", \"language\", \"main_img_url\", \"site_url\", \"hasImage\",", "SHAP plots shap_enabled = True # COMMAND ---------- if shap_enabled:", "for details on why these columns are dropped. # COMMAND", "#### Medium-cardinality categoricals # MAGIC Convert each medium-cardinality categorical column", "registry and to retrieve it later for inference. # MAGIC", "3 sets: # MAGIC - Train (60% of the dataset", "isn't very useful.) # MAGIC - Clone this notebook into", "preprocessor = ColumnTransformer(transformers, remainder=\"passthrough\", sparse_threshold=0) # COMMAND ---------- # MAGIC", "/> # MAGIC You can set the flag defined below", "# COMMAND ---------- # Enable automatic logging of input samples,", "mlflow import sklearn from sklearn import set_config from sklearn.pipeline import", "COMMAND ---------- # MAGIC %md # MAGIC ### Categorical columns", "to ensure that AutoML can run trials without # MAGIC", "input data into 3 sets: # MAGIC - Train (60%", "pandas as pd data = {'author': {0: '<EMAIL>jim.<EMAIL>'}, 'published': {0:", "to track runs # MAGIC - All the runs are", "attach this notebook to the **10-3-ML-Cluster** cluster and rerun it.", "2), max_features=1024))]) transformers.append((f\"text_{col}\", vectorizer, [col])) # COMMAND ---------- from sklearn.compose", "from sklearn import set_config from sklearn.pipeline import Pipeline set_config(display=\"diagram\") xgbc_classifier", "n_jobs=100, subsample=0.6972187716458148, verbosity=0, random_state=799811440, ) model = Pipeline([ (\"column_selector\", col_selector),", "this notebook to see the SHAP plots. # MAGIC -", "data from MLflow input_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], \"tmp\", str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir) #", "os import shutil import uuid import yaml None import xgboost", "example, class_names=model.classes_) # COMMAND ---------- # MAGIC %md # MAGIC", "---------- # model_uri for the generated model print(f\"runs:/{ mlflow_run.info.run_id }/model\")", "\"text\", \"title\", \"type\", \"author\"] col_selector = ColumnSelector(supported_cols) # COMMAND ----------", "be found in the cell below # MAGIC # MAGIC", "MAGIC %md # MAGIC ### Feature standardization # MAGIC Scale", "from sklearn.model_selection import train_test_split split_X = df_loaded.drop([target_col], axis=1) split_y =", "from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer for col", "set_config from sklearn.pipeline import Pipeline set_config(display=\"diagram\") xgbc_classifier = XGBClassifier( colsample_bytree=0.7324555878929649,", "train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem) # COMMAND ---------- # MAGIC", "the model on an unseen dataset) # COMMAND ---------- df_loaded.columns", "MAGIC - Generating SHAP feature importance is a very memory", "mode (most frequent values). mode = X_train.mode().iloc[0] # Sample background", "of the model) # MAGIC - Test (20% of the", "of the model on an unseen dataset) # COMMAND ----------", "(If you launched the AutoML experiment using the Experiments UI,", "MAGIC %md # MAGIC ## Train classification model # MAGIC", "data and examples to explain are imputed with the mode", "## Load Data # COMMAND ---------- from mlflow.tracking import MlflowClient", "MAGIC %md # MAGIC ### Text features # MAGIC Convert", "(\"standardizer\", standardizer), (\"classifier\", xgbc_classifier), ]) # Create a separate pipeline", "SHAP Explainer. Increase the sample size to reduce variance. train_sample", "prediction # COMMAND ---------- model_uri = f\"runs:/51c0348482e042ea8e4b7983ab6bff99/model\" model = mlflow.pyfunc.load_model(model_uri)", "are logged by MLflow autologging # Log metrics for the", "logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Change the", "xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix=\"val_\") # Log metrics for", "is a game-theoretic approach to explain machine learning models, providing", "the notebook to see the SHAP plots shap_enabled = True", "remaining data equally for validation and test X_val, X_test, y_val,", "shap_enabled: from shap import KernelExplainer, summary_plot # SHAP cannot explain", "COMMAND ---------- from sklearn.feature_extraction import FeatureHasher from sklearn.impute import SimpleImputer", "To view the full list of tunable hyperparameters, check the", "MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC registered_model_version =", "- SHAP cannot explain models using data with nulls; if", "]) mlflow.sklearn.autolog(disable=True) X_val_processed = pipeline.fit_transform(X_val, y_val) model # COMMAND ----------", "# MAGIC %md # MAGIC ## Inference # MAGIC [The", "correlation between the feature and the target variable. # MAGIC", "# MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri) # MAGIC model.predict(input_X)", "model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC registered_model_version = mlflow.register_model(model_uri,", "ngram_range = (1, 2), max_features=1024))]) transformers.append((f\"text_{col}\", vectorizer, [col])) # COMMAND", "model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False) # Training metrics are logged", "# Preview data df_loaded.head(5) # COMMAND ---------- df_loaded.head(1).to_dict() # COMMAND", "model_version = registered_model_version.version # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri=f\"models:/{model_name}/{model_version}\")", "MAGIC This is an auto-generated notebook. To reproduce these results,", "(string or numeric), the number of output columns is equal", "YAML for model serving import os import shutil import uuid", "earth'}, 'text': {0: 'aliens are coming to invade earth'}, 'language':", "[SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). # COMMAND ---------- # Set this flag to", "preprocessor), (\"standardizer\", standardizer), ]) mlflow.sklearn.autolog(disable=True) X_val_processed = pipeline.fit_transform(X_val, y_val) model", "sets: # MAGIC - Train (60% of the dataset used", "- Navigate to the parent notebook [here](#notebook/406583024052798) (If you launched", "of output columns is equal to the number of unique", "low-cardinality categorical column into multiple binary columns through one-hot encoding.", "Create a separate pipeline to transform the validation dataset. This", "# MAGIC # XGBoost training # MAGIC This is an", "AutoML can run trials without # MAGIC running out of", "# XGBoost training # MAGIC This is an auto-generated notebook.", "COMMAND ---------- if shap_enabled: from shap import KernelExplainer, summary_plot #", "predict on a dataset that has extra columns that are", "# COMMAND ---------- df_loaded.columns # COMMAND ---------- from sklearn.model_selection import", "See the Alerts tab of the AutoML Experiment page for", "the Alerts tab of the AutoML Experiment page for details", "model_uri = f\"runs:/51c0348482e042ea8e4b7983ab6bff99/model\" model = mlflow.pyfunc.load_model(model_uri) #model.predict(input_X) # COMMAND ----------", "# MAGIC ## Train classification model # MAGIC - Log", "# MAGIC For more thorough results, increase the sample size", "validation set. predict = lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns)) explainer =", "'published': {0: '2016-10-27T18:05:26.351+03:00'}, 'title': {0: 'aliens are coming to invade", "below # MAGIC # MAGIC ### Register to Model Registry", "Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Navigate", "# Sample a single example from the validation set to", "col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer), ]) mlflow.sklearn.autolog(disable=True) X_val_processed = pipeline.fit_transform(X_val,", "to retrieve it later for inference. # MAGIC # MAGIC", "hash_transformer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), (f\"{feature}_hasher\", FeatureHasher(n_features=1024, input_type=\"string\"))])", "# MAGIC importance, and impact/color describe the correlation between the", "# MAGIC ``` # MAGIC # MAGIC ### Load model", "classifier__verbose=False) # Training metrics are logged by MLflow autologging #", "MAGIC ``` # MAGIC model_name = \"Example\" # MAGIC #", "os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir) xgbc_client = MlflowClient() xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, \"model/conda.yaml\",", "input categorical column (string or numeric), the number of output", "is a collaborative hub where teams can share ML models,", "standardizer), (\"classifier\", xgbc_classifier), ]) # Create a separate pipeline to", "# MAGIC # MAGIC For more information on how to", "representation. # MAGIC Each string column is hashed to 1024", "max_features=1024))]) transformers.append((f\"text_{col}\", vectorizer, [col])) # COMMAND ---------- from sklearn.compose import", "to MLflow to track runs # MAGIC - All the", "mode (most frequent values). This affects the computed # MAGIC", "read it into a pandas DataFrame input_client = MlflowClient() input_data_path", "Convert each low-cardinality categorical column into multiple binary columns through", "cluster and rerun it. # MAGIC - Compare trials in", "model serving import os import shutil import uuid import yaml", "from mlflow.tracking import MlflowClient import os import uuid import shutil", "yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader) xgbc_parsed_model_env_str[\"dependencies\"][-1][\"pip\"].append(f\"xgboost=={xgboost.__version__}\") with open(xgbc_model_env_path, \"w\") as f: f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id,", "to explain. Increase the sample size and rerun for more", "# MAGIC ## Load Data # COMMAND ---------- from mlflow.tracking", "values). This affects the computed # MAGIC SHAP values, as", "Use Kernel SHAP to explain feature importance on the example", "MAGIC Convert each medium-cardinality categorical column into a numerical representation.", "# COMMAND ---------- # MAGIC %md # MAGIC ### Loading", "# COMMAND ---------- transformers = [] # COMMAND ---------- #", "test_size=0.5, random_state=799811440, stratify=split_y_rem) # COMMAND ---------- # MAGIC %md #", "fill_value=\"\")), (f\"{feature}_hasher\", FeatureHasher(n_features=1024, input_type=\"string\"))]) transformers.append((f\"{feature}_hasher\", hash_transformer, [feature])) # COMMAND ----------", "that has extra columns that are not used in training.", "size of explanations, or provide your own examples to explain.", "in this notebook to the model registry and to retrieve", "top word n-grams # MAGIC where n is in the", "ML models, work together from experimentation to online testing and", "- Log relevant metrics to MLflow to track runs #", "this notebook to the **10-3-ML-Cluster** cluster and rerun it. #", "yaml None import xgboost from mlflow.tracking import MlflowClient xgbc_temp_dir =", "if your dataset has any, both the background data and", "each feature to a fixed-length vector using TF-IDF vectorization. The", "example = X_val.sample(n=1).fillna(mode) # Use Kernel SHAP to explain feature", "ColumnSelector(supported_cols) # COMMAND ---------- # MAGIC %md # MAGIC ##", "UI, this link isn't very useful.) # MAGIC - Clone", "1.0}} df = pd.DataFrame(data=data) df.head() # COMMAND ---------- model.predict(df) #", "MAGIC SHAP values, as the imputed samples may not match", "to `shap_enabled = True` and re-run this notebook to see", "unit variance. # COMMAND ---------- from sklearn.preprocessing import StandardScaler standardizer", "# Create temp directory to download input data from MLflow", "hashed to 1024 float columns. # MAGIC Each numeric column", "---------- import pandas as pd data = {'author': {0: '<EMAIL>jim.<EMAIL>'},", "MAGIC # MAGIC For more information on how to read", "to explain. # MAGIC - SHAP cannot explain models using", "length of the output # MAGIC vector is equal to", "in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Navigate to the", "to log a different trial to the MLflow experiment #", "= {k.replace(\"val_\", \"\"): v for k, v in xgbc_val_metrics.items()} xgbc_test_metrics", "f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC registered_model_version = mlflow.register_model(model_uri, model_name) #", "`[]` are dropped in the pipelines. See the Alerts tab", "# MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_ # COMMAND ---------- import mlflow", "---------- # MAGIC %md # MAGIC ## Train classification model", "the validation set to explain. Increase the sample size and", "notebook source # MAGIC %md # MAGIC # XGBoost training", "model trained in this notebook to the model registry and", "X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y)", "is imputed with zeros. # COMMAND ---------- from sklearn.feature_extraction import", "Enable automatic logging of input samples, metrics, parameters, and models", "\"author\"] col_selector = ColumnSelector(supported_cols) # COMMAND ---------- # MAGIC %md", "# COMMAND ---------- # MAGIC %md # MAGIC ## Load", "# MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub", "y_val, prefix=\"val_\") # Log metrics for the test set xgbc_test_metrics", "(\"standardizer\", standardizer), ]) mlflow.sklearn.autolog(disable=True) X_val_processed = pipeline.fit_transform(X_val, y_val) model #", "coming to invade earth'}, 'text_without_stopwords': {0: 'aliens are coming to", "to the **10-3-ML-Cluster** cluster and rerun it. # MAGIC -", "X_val, y_val, prefix=\"val_\") # Log metrics for the test set", "invade earth'}, 'hasImage': {0: 1.0}} df = pd.DataFrame(data=data) df.head() #", "testing and production, integrate with approval and governance workflows, and", "are coming to invade earth'}, 'hasImage': {0: 1.0}} df =", "# MAGIC model_name = \"Example\" # MAGIC # MAGIC model_uri", "MAGIC # MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC", "word n-grams # MAGIC where n is in the range", "can run trials without # MAGIC running out of memory,", "MAGIC ### Loading model to make prediction # COMMAND ----------", "Clone** in the notebook toolbar. # MAGIC # MAGIC Runtime", "sklearn import set_config from sklearn.pipeline import Pipeline set_config(display=\"diagram\") xgbc_classifier =", "MAGIC > **NOTE:** The `model_uri` for the model already trained", "provide your own examples to explain. # MAGIC - SHAP", "columns that are supported. This allows us to train a", "{0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type': {0: 'bs'}, 'title_without_stopwords': {0: 'aliens are coming", "has extra columns that are not used in training. #", "# MAGIC ## Preprocessors # COMMAND ---------- transformers = []", "Preview data df_loaded.head(5) # COMMAND ---------- df_loaded.head(1).to_dict() # COMMAND ----------", "into multiple binary columns through one-hot encoding. # MAGIC For", "summary plot # MAGIC of the relationship between features and", "model print(f\"runs:/{ mlflow_run.info.run_id }/model\") # COMMAND ---------- # MAGIC %md", "os.makedirs(input_temp_dir) # Download the artifact and read it into a", "strategy=\"constant\", fill_value=\"\")), (f\"{feature}_hasher\", FeatureHasher(n_features=1024, input_type=\"string\"))]) transformers.append((f\"{feature}_hasher\", hash_transformer, [feature])) # COMMAND", "---------- from xgboost import XGBClassifier help(XGBClassifier) # COMMAND ---------- import", "= Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer), (\"classifier\", xgbc_classifier),", "Train - Validation - Test Split # MAGIC Split the", "MAGIC %md # MAGIC #### Medium-cardinality categoricals # MAGIC Convert", "random_state=799811440, stratify=split_y_rem) # COMMAND ---------- # MAGIC %md # MAGIC", "= {'author': {0: '<EMAIL>jim.<EMAIL>'}, 'published': {0: '2016-10-27T18:05:26.351+03:00'}, 'title': {0: 'aliens", "(most frequent values). This affects the computed # MAGIC SHAP", "from sklearn.preprocessing import OneHotEncoder one_hot_encoder = OneHotEncoder(handle_unknown=\"ignore\") transformers.append((\"onehot\", one_hot_encoder, [\"published\",", "used to tune the hyperparameters of the model) # MAGIC", "MAGIC vector is equal to 1024. Each column corresponds to", "experiment using the Experiments UI, this link isn't very useful.)", "open(xgbc_model_env_path, \"w\") as f: f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path=\"model\") shutil.rmtree(xgbc_temp_dir) #", "are supported. This allows us to train a model that", "own examples to explain. # MAGIC - SHAP cannot explain", "to the parent notebook [here](#notebook/406583024052798) (If you launched the AutoML", "model_name = \"Example\" # MAGIC model_version = registered_model_version.version # MAGIC", "reduce the computational overhead of each trial, a single example", "parameters and re-run the training cell to log a different", "The length of the output # MAGIC vector is equal", "%md # MAGIC ## Train classification model # MAGIC -", "y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False) # Training metrics are logged by", "to invade earth'}, 'hasImage': {0: 1.0}} df = pd.DataFrame(data=data) df.head()", "MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams can", "max_depth=7, min_child_weight=6, n_estimators=106, n_jobs=100, subsample=0.6972187716458148, verbosity=0, random_state=799811440, ) model =", "to the model environment YAML for model serving import os", "sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder one_hot_encoder = OneHotEncoder(handle_unknown=\"ignore\")", "- To view the full list of tunable hyperparameters, check", "# COMMAND ---------- # model_uri for the generated model print(f\"runs:/{", "changes the shape of the input to 2D (\"reshape\", FunctionTransformer(np.reshape,", "shap import KernelExplainer, summary_plot # SHAP cannot explain models using", "each trial, a single example is sampled from the validation", "MAGIC ## Train - Validation - Test Split # MAGIC", "#model.predict(input_X) # COMMAND ---------- import pandas as pd data =", "COMMAND ---------- from sklearn.model_selection import train_test_split split_X = df_loaded.drop([target_col], axis=1)", "'aliens are coming to invade earth'}, 'text_without_stopwords': {0: 'aliens are", "the output of the cell below # COMMAND ---------- from", "# MAGIC - Navigate to the parent notebook [here](#notebook/406583024052798) (If", "in [\"text\", \"main_img_url\"]: hash_transformer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")),", "model # COMMAND ---------- # Enable automatic logging of input", "COMMAND ---------- import mlflow import databricks.automl_runtime # Use MLflow to", "COMMAND ---------- # MAGIC %md # MAGIC ### Select supported", "of tunable hyperparameters, check the output of the cell below", "dropped in the pipelines. See the Alerts tab of the", "sklearn from sklearn import set_config from sklearn.pipeline import Pipeline set_config(display=\"diagram\")", "for the generated model print(f\"runs:/{ mlflow_run.info.run_id }/model\") # COMMAND ----------", "be imputed using the mode (most frequent values). This affects", "input column. # COMMAND ---------- from sklearn.pipeline import Pipeline from", "SHAP by default.<br /> # MAGIC You can set the", "examples to explain are imputed with the mode (most frequent", "silent=True) with mlflow.start_run(run_name=\"xgboost\") as mlflow_run: model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False)", "between the feature and the target variable. # MAGIC -", "through one-hot encoding. # MAGIC For each input categorical column", "Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer), (\"classifier\", xgbc_classifier), ])", "# COMMAND ---------- # MAGIC %md # MAGIC ### Text", "from sklearn.pipeline import Pipeline set_config(display=\"diagram\") xgbc_classifier = XGBClassifier( colsample_bytree=0.7324555878929649, learning_rate=0.007636627530856404,", "\"title\", \"type\", \"author\"] col_selector = ColumnSelector(supported_cols) # COMMAND ---------- #", "# MAGIC model = mlflow.pyfunc.load_model(model_uri) # MAGIC model.predict(input_X) # MAGIC", "between features and model output. Features are ranked in descending", "experiments mlflow.set_experiment(\"/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38\") target_col = \"label\" # COMMAND ---------- # MAGIC", "feature to a fixed-length vector using TF-IDF vectorization. The length", "MAGIC where n is in the range [1, 2]. #", "with nulls; if your dataset has any, both the background", "the top word n-grams # MAGIC where n is in", "dropped. # COMMAND ---------- from databricks.automl_runtime.sklearn.column_selector import ColumnSelector supported_cols =", "{'author': {0: '<EMAIL>jim.<EMAIL>'}, 'published': {0: '2016-10-27T18:05:26.351+03:00'}, 'title': {0: 'aliens are", "MAGIC # MAGIC SHAP is a game-theoretic approach to explain", "= mlflow.pyfunc.load_model(model_uri) #model.predict(input_X) # COMMAND ---------- import pandas as pd", "and # MAGIC examples to explain will be imputed using", "import Pipeline from sklearn.preprocessing import OneHotEncoder one_hot_encoder = OneHotEncoder(handle_unknown=\"ignore\") transformers.append((\"onehot\",", "= Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), # Reshape to 1D", "with mlflow.start_run(run_name=\"xgboost\") as mlflow_run: model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False) #", "explain machine learning models, providing a summary plot # MAGIC", "1D since SimpleImputer changes the shape of the input to", "to online testing and production, integrate with approval and governance", "zeros. # COMMAND ---------- from sklearn.feature_extraction import FeatureHasher from sklearn.impute", "in the notebook toolbar. # MAGIC # MAGIC Runtime Version:", "performance. The snippets below show how to add the model", "early stopping. pipeline = Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\",", "= f\"runs:/51c0348482e042ea8e4b7983ab6bff99/model\" model = mlflow.pyfunc.load_model(model_uri) #model.predict(input_X) # COMMAND ---------- import", "and to retrieve it later for inference. # MAGIC #", "SHAP to succeed, both the background data and examples to", "ranked in descending order of # MAGIC importance, and impact/color", "online testing and production, integrate with approval and governance workflows,", "the AutoML experiment using the Experiments UI, this link isn't", "(most frequent values). mode = X_train.mode().iloc[0] # Sample background data", "registered_model_version = mlflow.register_model(model_uri, model_name) # MAGIC ``` # MAGIC #", "in {'type', 'author'}: vectorizer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")),", "for col in {'type', 'author'}: vectorizer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None,", "providing a summary plot # MAGIC of the relationship between", "{0: '2016-10-27T18:05:26.351+03:00'}, 'title': {0: 'aliens are coming to invade earth'},", "dataset has any, both the background data and # MAGIC", "COMMAND ---------- # MAGIC %md # MAGIC ## Feature importance", "MAGIC - All the runs are logged under [this MLflow", "of the cell below # COMMAND ---------- from xgboost import", "import MlflowClient xgbc_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir) xgbc_client = MlflowClient()", "without # MAGIC running out of memory, we disable SHAP", "set. predict = lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns)) explainer = KernelExplainer(predict,", "pd # Create temp directory to download input data from", "---------- # MAGIC %md # MAGIC ## Inference # MAGIC", "---------- # MAGIC %md # MAGIC #### Medium-cardinality categoricals #", "vector using TF-IDF vectorization. The length of the output #", "memory intensive operation, so to ensure that AutoML can run", "MAGIC ## Train classification model # MAGIC - Log relevant", "or numeric), the number of output columns is equal to", "experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Navigate to the parent notebook [here](#notebook/406583024052798)", "= train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem) # COMMAND ---------- #", "increase the sample size of explanations, or provide your own", "MAGIC Split the input data into 3 sets: # MAGIC", "MAGIC - Clone this notebook into your project folder by", "metrics are logged by MLflow autologging # Log metrics for", "columns that are not used in training. # MAGIC `[]`", "and production, integrate with approval and governance workflows, and monitor", "can set the flag defined below to `shap_enabled = True`", "---------- df_loaded.columns # COMMAND ---------- from sklearn.model_selection import train_test_split split_X", "MAGIC %md # MAGIC ### Loading model to make prediction", "MAGIC ### Load from Model Registry # MAGIC ``` #", "samples, metrics, parameters, and models mlflow.sklearn.autolog(log_input_examples=True, silent=True) with mlflow.start_run(run_name=\"xgboost\") as", "model that can predict on a dataset that has extra", "vectorizer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), # Reshape to", "in this notebook can be found in the cell below", "MAGIC ## Inference # MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is", "of the input to 2D (\"reshape\", FunctionTransformer(np.reshape, kw_args={\"newshape\":-1})), (\"tfidf\", TfidfVectorizer(decode_error=\"ignore\",", "the model environment YAML for model serving import os import", "'text_without_stopwords': {0: 'aliens are coming to invade earth'}, 'hasImage': {0:", "[] # COMMAND ---------- # MAGIC %md # MAGIC ###", "data and # MAGIC examples to explain will be imputed", "= mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix=\"test_\") # Display the logged metrics", "# Log metrics for the validation set xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model,", "# MAGIC - SHAP cannot explain models using data with", "# MAGIC running out of memory, we disable SHAP by", "MAGIC %md # MAGIC ## Train - Validation - Test", "fixed-length vector using TF-IDF vectorization. The length of the output", "for the test set xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix=\"test_\")", "full list of tunable hyperparameters, check the output of the", "columns=X_train.columns)) explainer = KernelExplainer(predict, train_sample, link=\"logit\") shap_values = explainer.shap_values(example, l1_reg=False)", "``` # MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC", "example is sampled from the validation set to explain.<br />", "OneHotEncoder one_hot_encoder = OneHotEncoder(handle_unknown=\"ignore\") transformers.append((\"onehot\", one_hot_encoder, [\"published\", \"language\", \"site_url\", \"hasImage\",", "MAGIC #### Medium-cardinality categoricals # MAGIC Convert each medium-cardinality categorical", "# COMMAND ---------- from sklearn.model_selection import train_test_split split_X = df_loaded.drop([target_col],", "Feature importance # MAGIC # MAGIC SHAP is a game-theoretic", "sklearn.preprocessing import FunctionTransformer for col in {'type', 'author'}: vectorizer =", "target variable. # MAGIC - Generating SHAP feature importance is", "MAGIC ### Feature standardization # MAGIC Scale all feature columns", "# Set this flag to True and re-run the notebook", "\"data\", input_temp_dir) df_loaded = pd.read_parquet(os.path.join(input_data_path, \"training_data\")) # Delete the temp", "X_val_processed = pipeline.fit_transform(X_val, y_val) model # COMMAND ---------- # Enable", "> Clone** in the notebook toolbar. # MAGIC # MAGIC", "input_temp_dir) df_loaded = pd.read_parquet(os.path.join(input_data_path, \"training_data\")) # Delete the temp data", "pd.read_parquet(os.path.join(input_data_path, \"training_data\")) # Delete the temp data shutil.rmtree(input_temp_dir) # Preview", "for SHAP Explainer. Increase the sample size to reduce variance.", "# MAGIC ### Select supported columns # MAGIC Select only", "pd data = {'author': {0: '<EMAIL>jim.<EMAIL>'}, 'published': {0: '2016-10-27T18:05:26.351+03:00'}, 'title':", "The snippets below show how to add the model trained", "into 3 sets: # MAGIC - Train (60% of the", "string column is hashed to 1024 float columns. # MAGIC", "check the output of the cell below # COMMAND ----------", "import set_config from sklearn.pipeline import Pipeline set_config(display=\"diagram\") xgbc_classifier = XGBClassifier(", "MAGIC # MAGIC > **NOTE:** The `model_uri` for the model", "so to ensure that AutoML can run trials without #", "transformers.append((\"onehot\", one_hot_encoder, [\"published\", \"language\", \"site_url\", \"hasImage\", \"title\", \"title_without_stopwords\", \"text_without_stopwords\"])) #", "is equal to the number of unique values in the", "inference. # MAGIC # MAGIC > **NOTE:** The `model_uri` for", "= f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC # MAGIC model =", "to a fixed-length vector using TF-IDF vectorization. The length of", "both the background data and # MAGIC examples to explain", "the validation set to explain.<br /> # MAGIC For more", "model = mlflow.pyfunc.load_model(model_uri) # MAGIC model.predict(input_X) # MAGIC ``` #", "descending order of # MAGIC importance, and impact/color describe the", "collaborative hub where teams can share ML models, work together", "# MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\" # MAGIC registered_model_version", "sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer(transformers, remainder=\"passthrough\", sparse_threshold=0) # COMMAND", "---------- # MAGIC %md # MAGIC ### Text features #", "validation set xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix=\"val_\") # Log", "mlflow.start_run(run_name=\"xgboost\") as mlflow_run: model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False) # Training", "---------- # MAGIC %md # MAGIC ### Select supported columns", "deployments and their performance. The snippets below show how to", "---------- # Set this flag to True and re-run the", "from sklearn.feature_extraction import FeatureHasher from sklearn.impute import SimpleImputer from sklearn.pipeline", "the range [1, 2]. # COMMAND ---------- import numpy as", "[\"published\", \"language\", \"site_url\", \"hasImage\", \"title\", \"title_without_stopwords\", \"text_without_stopwords\"])) # COMMAND ----------", "# COMMAND ---------- from sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer(transformers,", "MAGIC # MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_ # COMMAND ---------- import", "set xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix=\"val_\") # Log metrics", "this notebook can be found in the cell below #", "(1, 2), max_features=1024))]) transformers.append((f\"text_{col}\", vectorizer, [col])) # COMMAND ---------- from", "- Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC -", "training cell to log a different trial to the MLflow", "hash_transformer, [feature])) # COMMAND ---------- # MAGIC %md # MAGIC", "# MAGIC ### Loading model to make prediction # COMMAND", "columns # MAGIC Select only the columns that are supported.", "colsample_bytree=0.7324555878929649, learning_rate=0.007636627530856404, max_depth=7, min_child_weight=6, n_estimators=106, n_jobs=100, subsample=0.6972187716458148, verbosity=0, random_state=799811440, )", "MLflow autologging # Log metrics for the validation set xgbc_val_metrics", "\"\"): v for k, v in xgbc_val_metrics.items()} xgbc_test_metrics = {k.replace(\"test_\",", "model = mlflow.pyfunc.load_model(model_uri) #model.predict(input_X) # COMMAND ---------- import pandas as", "a model that can predict on a dataset that has", "automatic logging of input samples, metrics, parameters, and models mlflow.sklearn.autolog(log_input_examples=True,", "track experiments mlflow.set_experiment(\"/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38\") target_col = \"label\" # COMMAND ---------- #", "Select supported columns # MAGIC Select only the columns that", "train the model) # MAGIC - Validation (20% of the", "a fixed-length vector using TF-IDF vectorization. The length of the", "validation set to explain.<br /> # MAGIC For more thorough", "registering # MAGIC ``` # MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id", "generated model print(f\"runs:/{ mlflow_run.info.run_id }/model\") # COMMAND ---------- # MAGIC", "float columns. # MAGIC Each numeric column is imputed with", "mlflow.tracking import MlflowClient import os import uuid import shutil import", "Reshape to 1D since SimpleImputer changes the shape of the", "import SimpleImputer from sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer", "standardization # MAGIC Scale all feature columns to be centered", "re-run the training cell to log a different trial to", "for k, v in xgbc_val_metrics.items()} xgbc_test_metrics = {k.replace(\"test_\", \"\"): v", "[1, 2]. # COMMAND ---------- import numpy as np from", "SHAP is a game-theoretic approach to explain machine learning models,", "(f\"{feature}_hasher\", FeatureHasher(n_features=1024, input_type=\"string\"))]) transformers.append((f\"{feature}_hasher\", hash_transformer, [feature])) # COMMAND ---------- #", "MAGIC %md # MAGIC # XGBoost training # MAGIC This", "the training cell to log a different trial to the", "this notebook into your project folder by selecting **File >", "# MAGIC model = mlflow.pyfunc.load_model(model_uri=f\"models:/{model_name}/{model_version}\") # MAGIC model.predict(input_X) # MAGIC", "encoding. # MAGIC For each input categorical column (string or", "the input data into 3 sets: # MAGIC - Train", "example from the validation set to explain. Increase the sample", "# COMMAND ---------- import mlflow import databricks.automl_runtime # Use MLflow", "---------- from sklearn.feature_extraction import FeatureHasher from sklearn.impute import SimpleImputer from", "one_hot_encoder, [\"published\", \"language\", \"site_url\", \"hasImage\", \"title\", \"title_without_stopwords\", \"text_without_stopwords\"])) # COMMAND", "input data from MLflow input_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], \"tmp\", str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir)", "out of memory, we disable SHAP by default.<br /> #", "on a dataset that has extra columns that are not", "{k.replace(\"val_\", \"\"): v for k, v in xgbc_val_metrics.items()} xgbc_test_metrics =", "\"model/conda.yaml\", xgbc_temp_dir) xgbc_model_env_str = open(xgbc_model_env_path) xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader) xgbc_parsed_model_env_str[\"dependencies\"][-1][\"pip\"].append(f\"xgboost=={xgboost.__version__}\")", "background data and examples to explain are imputed with the", "# MAGIC Convert each medium-cardinality categorical column into a numerical", "uuid import shutil import pandas as pd # Create temp", "is a very memory intensive operation, so to ensure that", "sample size to reduce variance. train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode) #", "a single example from the validation set to explain. Increase", "# COMMAND ---------- import pandas as pd data = {'author':", "Each column corresponds to one of the top word n-grams", "MAGIC - SHAP cannot explain models using data with nulls;", "the number of unique values in the input column. #", "'title_without_stopwords': {0: 'aliens are coming to invade earth'}, 'text_without_stopwords': {0:", "background data and # MAGIC examples to explain will be", "the pipelines. See the Alerts tab of the AutoML Experiment", "COMMAND ---------- import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer", "single example from the validation set to explain. Increase the", "X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem) #", "MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where", "mlflow.tracking import MlflowClient xgbc_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir) xgbc_client =", "Increase the sample size and rerun for more thorough results.", "TfidfVectorizer(decode_error=\"ignore\", ngram_range = (1, 2), max_features=1024))]) transformers.append((f\"text_{col}\", vectorizer, [col])) #", "split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y) # Split remaining", "share ML models, work together from experimentation to online testing", "## Train classification model # MAGIC - Log relevant metrics", "data = {'author': {0: '<EMAIL>jim.<EMAIL>'}, 'published': {0: '2016-10-27T18:05:26.351+03:00'}, 'title': {0:", "download input data from MLflow input_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], \"tmp\", str(uuid.uuid4())[:8])", "MAGIC model.predict(input_X) # MAGIC ``` # COMMAND ---------- # model_uri", "the sample size of explanations, or provide your own examples", "input_type=\"string\"))]) transformers.append((f\"{feature}_hasher\", hash_transformer, [feature])) # COMMAND ---------- # MAGIC %md", "# COMMAND ---------- # Set this flag to True and", "'author'}: vectorizer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), # Reshape", "# MAGIC For each input categorical column (string or numeric),", "of memory, we disable SHAP by default.<br /> # MAGIC", "shutil.rmtree(xgbc_temp_dir) # COMMAND ---------- # MAGIC %md # MAGIC ##", "in the cell below # MAGIC # MAGIC ### Register", "# COMMAND ---------- # MAGIC %md # MAGIC ### Feature", "serving import os import shutil import uuid import yaml None", "model) # MAGIC - Validation (20% of the dataset used", "# MAGIC %md # MAGIC #### Low-cardinality categoricals # MAGIC", "- Validation - Test Split # MAGIC Split the input", "model parameters and re-run the training cell to log a", "# MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri=f\"models:/{model_name}/{model_version}\") # MAGIC model.predict(input_X)", "and read it into a pandas DataFrame input_client = MlflowClient()", "your own examples to explain. # MAGIC - SHAP cannot", "packages to the model environment YAML for model serving import", "MAGIC %md # MAGIC ### Categorical columns # COMMAND ----------", "KernelExplainer, summary_plot # SHAP cannot explain models using data with", "# COMMAND ---------- if shap_enabled: from shap import KernelExplainer, summary_plot", "below to `shap_enabled = True` and re-run this notebook to", "notebook [here](#notebook/406583024052798) (If you launched the AutoML experiment using the", "production, integrate with approval and governance workflows, and monitor ML", "{0: 1.0}} df = pd.DataFrame(data=data) df.head() # COMMAND ---------- model.predict(df)", "test set xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test, prefix=\"test_\") # Display", "MlflowClient xgbc_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir) xgbc_client = MlflowClient() xgbc_model_env_path", "for validation and test X_val, X_test, y_val, y_test = train_test_split(split_X_rem,", "COMMAND ---------- import pandas as pd data = {'author': {0:", "Change the model parameters and re-run the training cell to", "df_loaded.head(5) # COMMAND ---------- df_loaded.head(1).to_dict() # COMMAND ---------- # MAGIC", "This allows us to train a model that can predict", "to True and re-run the notebook to see the SHAP", "[feature])) # COMMAND ---------- # MAGIC %md # MAGIC ###", "FunctionTransformer for col in {'type', 'author'}: vectorizer = Pipeline(steps=[ (\"imputer\",", "distribution. # MAGIC # MAGIC For more information on how", "# SHAP cannot explain models using data with nulls. #", "= \"Example\" # MAGIC # MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id", "= df_loaded.drop([target_col], axis=1) split_y = df_loaded[target_col] # Split out train", "the imputed samples may not match the actual data distribution.", "by MLflow autologging # Log metrics for the validation set", "Split remaining data equally for validation and test X_val, X_test,", "# MAGIC - All the runs are logged under [this", "import OneHotEncoder one_hot_encoder = OneHotEncoder(handle_unknown=\"ignore\") transformers.append((\"onehot\", one_hot_encoder, [\"published\", \"language\", \"site_url\",", "launched the AutoML experiment using the Experiments UI, this link", "True` and re-run this notebook to see the SHAP plots.", "view the full list of tunable hyperparameters, check the output", "variance. train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode) # Sample a single example", "axis=1) split_y = df_loaded[target_col] # Split out train data X_train,", "MAGIC model = mlflow.pyfunc.load_model(model_uri=f\"models:/{model_name}/{model_version}\") # MAGIC model.predict(input_X) # MAGIC ```", "models using data with nulls; if your dataset has any,", "MAGIC model_version = registered_model_version.version # MAGIC # MAGIC model =", "to the MLflow experiment # MAGIC - To view the", "it. # MAGIC - Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)", "MAGIC `[]` are dropped in the pipelines. See the Alerts", "set_config(display=\"diagram\") xgbc_classifier = XGBClassifier( colsample_bytree=0.7324555878929649, learning_rate=0.007636627530856404, max_depth=7, min_child_weight=6, n_estimators=106, n_jobs=100,", "MLflow to track runs # MAGIC - All the runs", "show how to add the model trained in this notebook", "## Preprocessors # COMMAND ---------- transformers = [] # COMMAND", "from mlflow.tracking import MlflowClient xgbc_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir) xgbc_client", "# MAGIC %md # MAGIC ### Categorical columns # COMMAND", "machine learning models, providing a summary plot # MAGIC of", "MAGIC # MAGIC ### Load model without registering # MAGIC", "unseen dataset) # COMMAND ---------- df_loaded.columns # COMMAND ---------- from", "= Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), (f\"{feature}_hasher\", FeatureHasher(n_features=1024, input_type=\"string\"))]) transformers.append((f\"{feature}_hasher\",", "MAGIC ### Register to Model Registry # MAGIC ``` #", "X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem)", "model_name = \"Example\" # MAGIC # MAGIC model_uri = f\"runs:/{", "numeric), the number of output columns is equal to the", "`shap_enabled = True` and re-run this notebook to see the", "Test Split # MAGIC Split the input data into 3", "columns to be centered around zero with unit variance. #", "models, providing a summary plot # MAGIC of the relationship", "%md # MAGIC ### Text features # MAGIC Convert each", "the full list of tunable hyperparameters, check the output of", "# Split remaining data equally for validation and test X_val,", "2D (\"reshape\", FunctionTransformer(np.reshape, kw_args={\"newshape\":-1})), (\"tfidf\", TfidfVectorizer(decode_error=\"ignore\", ngram_range = (1, 2),", "x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns)) explainer = KernelExplainer(predict, train_sample, link=\"logit\") shap_values =", "- Train (60% of the dataset used to train the", "are ranked in descending order of # MAGIC importance, and", "Loading model to make prediction # COMMAND ---------- model_uri =", "the example from the validation set. predict = lambda x:", "coming to invade earth'}, 'language': {0: 'english'}, 'site_url': {0: 'cnn.com'},", "the SHAP plots. # MAGIC - To reduce the computational", "= yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader) xgbc_parsed_model_env_str[\"dependencies\"][-1][\"pip\"].append(f\"xgboost=={xgboost.__version__}\") with open(xgbc_model_env_path, \"w\") as f: f.write(yaml.dump(xgbc_parsed_model_env_str))", "DataFrame input_client = MlflowClient() input_data_path = input_client.download_artifacts(\"c2dfe80b419d4a8dbc88a90e3274369a\", \"data\", input_temp_dir) df_loaded", "import os import uuid import shutil import pandas as pd", "= lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns)) explainer = KernelExplainer(predict, train_sample, link=\"logit\")", "- All the runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false)", "plots. # MAGIC - To reduce the computational overhead of", "SHAP values, as the imputed samples may not match the", "pipeline = Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer), ])", "n-grams # MAGIC where n is in the range [1,", "MAGIC You can set the flag defined below to `shap_enabled", "trial, a single example is sampled from the validation set", "earth'}, 'text_without_stopwords': {0: 'aliens are coming to invade earth'}, 'hasImage':", "%md # MAGIC ## Load Data # COMMAND ---------- from", "column is imputed with zeros. # COMMAND ---------- from sklearn.feature_extraction", "import TfidfVectorizer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline", "MAGIC - Change the model parameters and re-run the training", "metrics for the test set xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test, y_test,", "cell below # COMMAND ---------- from xgboost import XGBClassifier help(XGBClassifier)", "= input_client.download_artifacts(\"c2dfe80b419d4a8dbc88a90e3274369a\", \"data\", input_temp_dir) df_loaded = pd.read_parquet(os.path.join(input_data_path, \"training_data\")) # Delete", "This is used for early stopping. pipeline = Pipeline([ (\"column_selector\",", "\"language\", \"main_img_url\", \"site_url\", \"hasImage\", \"title_without_stopwords\", \"text\", \"title\", \"type\", \"author\"] col_selector", "'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type': {0: 'bs'}, 'title_without_stopwords': {0: 'aliens are coming to", "using data with nulls. # To enable SHAP to succeed,", "**10-3-ML-Cluster** cluster and rerun it. # MAGIC - Compare trials", "k, v in xgbc_test_metrics.items()} display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=[\"validation\", \"test\"])) # COMMAND", "results, increase the sample size of explanations, or provide your", "import os import shutil import uuid import yaml None import", "not used in training. # MAGIC `[]` are dropped in", "to one of the top word n-grams # MAGIC where", "Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer), ]) mlflow.sklearn.autolog(disable=True) X_val_processed", "track runs # MAGIC - All the runs are logged", "around zero with unit variance. # COMMAND ---------- from sklearn.preprocessing", "\"site_url\", \"hasImage\", \"title_without_stopwords\", \"text\", \"title\", \"type\", \"author\"] col_selector = ColumnSelector(supported_cols)", "re-run the notebook to see the SHAP plots shap_enabled =", "\"site_url\", \"hasImage\", \"title\", \"title_without_stopwords\", \"text_without_stopwords\"])) # COMMAND ---------- # MAGIC", "``` # MAGIC model_name = \"Example\" # MAGIC model_version =", "'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type': {0: 'bs'}, 'title_without_stopwords': {0: 'aliens are", "## Feature importance # MAGIC # MAGIC SHAP is a", "from sklearn.preprocessing import StandardScaler standardizer = StandardScaler() # COMMAND ----------", "stratify=split_y) # Split remaining data equally for validation and test", "# MAGIC # MAGIC SHAP is a game-theoretic approach to", "MAGIC - Navigate to the parent notebook [here](#notebook/406583024052798) (If you", "**File > Clone** in the notebook toolbar. # MAGIC #", "import Pipeline from sklearn.preprocessing import FunctionTransformer for col in {'type',", "mlflow.pyfunc.load_model(model_uri) #model.predict(input_X) # COMMAND ---------- import pandas as pd data", "# MAGIC # MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\" #", "variance. # COMMAND ---------- from sklearn.preprocessing import StandardScaler standardizer =", "import sklearn from sklearn import set_config from sklearn.pipeline import Pipeline", "model = mlflow.pyfunc.load_model(model_uri=f\"models:/{model_name}/{model_version}\") # MAGIC model.predict(input_X) # MAGIC ``` #", "model environment YAML for model serving import os import shutil", "Features are ranked in descending order of # MAGIC importance,", "# MAGIC ### Categorical columns # COMMAND ---------- # MAGIC", "transform the validation dataset. This is used for early stopping.", "Pipeline set_config(display=\"diagram\") xgbc_classifier = XGBClassifier( colsample_bytree=0.7324555878929649, learning_rate=0.007636627530856404, max_depth=7, min_child_weight=6, n_estimators=106,", "import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.impute", "col_selector = ColumnSelector(supported_cols) # COMMAND ---------- # MAGIC %md #", "the validation dataset. This is used for early stopping. pipeline", "COMMAND ---------- import mlflow import sklearn from sklearn import set_config", "MAGIC ### Load model without registering # MAGIC ``` #", "MAGIC model_name = \"Example\" # MAGIC model_version = registered_model_version.version #", "to 1D since SimpleImputer changes the shape of the input", "may not match the actual data distribution. # MAGIC #", "see the SHAP plots shap_enabled = True # COMMAND ----------", "snippets below show how to add the model trained in", "the background data and examples to explain are imputed with", "= explainer.shap_values(example, l1_reg=False) summary_plot(shap_values, example, class_names=model.classes_) # COMMAND ---------- #", "the parent notebook [here](#notebook/406583024052798) (If you launched the AutoML experiment", "temp directory to download input data from MLflow input_temp_dir =", "as pd data = {'author': {0: '<EMAIL>jim.<EMAIL>'}, 'published': {0: '2016-10-27T18:05:26.351+03:00'},", "importance on the example from the validation set. predict =", "X_val.sample(n=1).fillna(mode) # Use Kernel SHAP to explain feature importance on", "n_estimators=106, n_jobs=100, subsample=0.6972187716458148, verbosity=0, random_state=799811440, ) model = Pipeline([ (\"column_selector\",", "SHAP plots. # MAGIC - To reduce the computational overhead", "only the columns that are supported. This allows us to", "be centered around zero with unit variance. # COMMAND ----------", "as mlflow_run: model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False) # Training metrics", "information on how to read Shapley values, see the [SHAP", "samples may not match the actual data distribution. # MAGIC", "sklearn.feature_extraction.text import TfidfVectorizer from sklearn.impute import SimpleImputer from sklearn.pipeline import", "[\"text\", \"main_img_url\"]: hash_transformer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), (f\"{feature}_hasher\",", "each input categorical column (string or numeric), the number of", "\"hasImage\", \"title\", \"title_without_stopwords\", \"text_without_stopwords\"])) # COMMAND ---------- # MAGIC %md", "Log relevant metrics to MLflow to track runs # MAGIC", "MLflow input_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], \"tmp\", str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir) # Download the", "COMMAND ---------- # MAGIC %md # MAGIC ### Feature standardization", "import StandardScaler standardizer = StandardScaler() # COMMAND ---------- # MAGIC", "the model trained in this notebook to the model registry", "values). mode = X_train.mode().iloc[0] # Sample background data for SHAP", "# MAGIC Each string column is hashed to 1024 float", "Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), (f\"{feature}_hasher\", FeatureHasher(n_features=1024, input_type=\"string\"))]) transformers.append((f\"{feature}_hasher\", hash_transformer,", "nulls; if your dataset has any, both the background data", "}/model\" # MAGIC registered_model_version = mlflow.register_model(model_uri, model_name) # MAGIC ```", "to transform the validation dataset. This is used for early", "a pandas DataFrame input_client = MlflowClient() input_data_path = input_client.download_artifacts(\"c2dfe80b419d4a8dbc88a90e3274369a\", \"data\",", "Delete the temp data shutil.rmtree(input_temp_dir) # Preview data df_loaded.head(5) #", "# MAGIC #### Medium-cardinality categoricals # MAGIC Convert each medium-cardinality", "can share ML models, work together from experimentation to online", "dataset used to tune the hyperparameters of the model) #", "standardizer), ]) mlflow.sklearn.autolog(disable=True) X_val_processed = pipeline.fit_transform(X_val, y_val) model # COMMAND", "xgbc_classifier = XGBClassifier( colsample_bytree=0.7324555878929649, learning_rate=0.007636627530856404, max_depth=7, min_child_weight=6, n_estimators=106, n_jobs=100, subsample=0.6972187716458148,", "Inference # MAGIC [The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative", "---------- import numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from", "used for early stopping. pipeline = Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\",", "set the flag defined below to `shap_enabled = True` and", "All the runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) #", "f\"runs:/51c0348482e042ea8e4b7983ab6bff99/model\" model = mlflow.pyfunc.load_model(model_uri) #model.predict(input_X) # COMMAND ---------- import pandas", "Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). # COMMAND ---------- #", "mlflow.sklearn.autolog(log_input_examples=True, silent=True) with mlflow.start_run(run_name=\"xgboost\") as mlflow_run: model.fit(X_train, y_train, classifier__early_stopping_rounds=5, classifier__eval_set=[(X_val_processed,y_val)],", "Sample a single example from the validation set to explain.", "---------- # Patch requisite packages to the model environment YAML", "the generated model print(f\"runs:/{ mlflow_run.info.run_id }/model\") # COMMAND ---------- #", "target_col = \"label\" # COMMAND ---------- # MAGIC %md #", "transformers.append((f\"text_{col}\", vectorizer, [col])) # COMMAND ---------- from sklearn.compose import ColumnTransformer", "output. Features are ranked in descending order of # MAGIC", "Databricks notebook source # MAGIC %md # MAGIC # XGBoost", "Experiment page for details on why these columns are dropped.", "= ColumnTransformer(transformers, remainder=\"passthrough\", sparse_threshold=0) # COMMAND ---------- # MAGIC %md", "see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). # COMMAND ---------- # Set this", "xgboost from mlflow.tracking import MlflowClient xgbc_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir)", "= {k.replace(\"test_\", \"\"): v for k, v in xgbc_test_metrics.items()} display(pd.DataFrame([xgbc_val_metrics,", "artifact_path=\"model\") shutil.rmtree(xgbc_temp_dir) # COMMAND ---------- # MAGIC %md # MAGIC", "and examples to explain are imputed with the mode (most", "xgbc_test_metrics.items()} display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=[\"validation\", \"test\"])) # COMMAND ---------- # Patch", "more information on how to read Shapley values, see the", "MAGIC Select only the columns that are supported. This allows", "data X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440,", "to explain will be imputed using the mode (most frequent", "column (string or numeric), the number of output columns is", "Use MLflow to track experiments mlflow.set_experiment(\"/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38\") target_col = \"label\" #", "the temp data shutil.rmtree(input_temp_dir) # Preview data df_loaded.head(5) # COMMAND", "xgbc_test_metrics = {k.replace(\"test_\", \"\"): v for k, v in xgbc_test_metrics.items()}", "= mlflow.pyfunc.load_model(model_uri=f\"models:/{model_name}/{model_version}\") # MAGIC model.predict(input_X) # MAGIC ``` # MAGIC", "Version: _10.3.x-cpu-ml-scala2.12_ # COMMAND ---------- import mlflow import databricks.automl_runtime #", "---------- from databricks.automl_runtime.sklearn.column_selector import ColumnSelector supported_cols = [\"text_without_stopwords\", \"published\", \"language\",", "standardizer = StandardScaler() # COMMAND ---------- # MAGIC %md #", "to see the SHAP plots shap_enabled = True # COMMAND", "%md # MAGIC ### Loading model to make prediction #", "### Select supported columns # MAGIC Select only the columns", "SHAP feature importance is a very memory intensive operation, so", "explain. Increase the sample size and rerun for more thorough", "logging of input samples, metrics, parameters, and models mlflow.sklearn.autolog(log_input_examples=True, silent=True)", "model # MAGIC - Log relevant metrics to MLflow to", "can predict on a dataset that has extra columns that", "imputed with the mode (most frequent values). mode = X_train.mode().iloc[0]", "mlflow.pyfunc.load_model(model_uri) # MAGIC model.predict(input_X) # MAGIC ``` # COMMAND ----------", "# Split out train data X_train, split_X_rem, y_train, split_y_rem =", "# MAGIC %md # MAGIC # XGBoost training # MAGIC", "model on an unseen dataset) # COMMAND ---------- df_loaded.columns #", "MLflow to track experiments mlflow.set_experiment(\"/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38\") target_col = \"label\" # COMMAND", "---------- transformers = [] # COMMAND ---------- # MAGIC %md", "used in training. # MAGIC `[]` are dropped in the", "local_path=xgbc_model_env_path, artifact_path=\"model\") shutil.rmtree(xgbc_temp_dir) # COMMAND ---------- # MAGIC %md #", "train data X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6,", "\"title_without_stopwords\", \"text\", \"title\", \"type\", \"author\"] col_selector = ColumnSelector(supported_cols) # COMMAND", "TF-IDF vectorization. The length of the output # MAGIC vector", "columns are dropped. # COMMAND ---------- from databricks.automl_runtime.sklearn.column_selector import ColumnSelector", "uuid import yaml None import xgboost from mlflow.tracking import MlflowClient", "Clone this notebook into your project folder by selecting **File", "MAGIC %md # MAGIC ### Select supported columns # MAGIC", "training # MAGIC This is an auto-generated notebook. To reproduce", "it later for inference. # MAGIC # MAGIC > **NOTE:**", "one of the top word n-grams # MAGIC where n", "from MLflow input_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], \"tmp\", str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir) # Download", "\"type\", \"author\"] col_selector = ColumnSelector(supported_cols) # COMMAND ---------- # MAGIC", "df_loaded.drop([target_col], axis=1) split_y = df_loaded[target_col] # Split out train data", "%md # MAGIC ### Select supported columns # MAGIC Select", "sklearn.feature_extraction import FeatureHasher from sklearn.impute import SimpleImputer from sklearn.pipeline import", "plots shap_enabled = True # COMMAND ---------- if shap_enabled: from", "= train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y) # Split remaining data", "import FeatureHasher from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline", "k, v in xgbc_val_metrics.items()} xgbc_test_metrics = {k.replace(\"test_\", \"\"): v for", "(60% of the dataset used to train the model) #", "# MAGIC of the relationship between features and model output.", "notebook toolbar. # MAGIC # MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_ #", "\"main_img_url\", \"site_url\", \"hasImage\", \"title_without_stopwords\", \"text\", \"title\", \"type\", \"author\"] col_selector =", "hyperparameters of the model) # MAGIC - Test (20% of", "under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Change the model", "more thorough results. example = X_val.sample(n=1).fillna(mode) # Use Kernel SHAP", "COMMAND ---------- df_loaded.head(1).to_dict() # COMMAND ---------- # MAGIC %md #", "into a pandas DataFrame input_client = MlflowClient() input_data_path = input_client.download_artifacts(\"c2dfe80b419d4a8dbc88a90e3274369a\",", "'text': {0: 'aliens are coming to invade earth'}, 'language': {0:", "(\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer), (\"classifier\", xgbc_classifier), ]) #", "are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Change", "Register to Model Registry # MAGIC ``` # MAGIC model_name", "SHAP cannot explain models using data with nulls. # To", "the shape of the input to 2D (\"reshape\", FunctionTransformer(np.reshape, kw_args={\"newshape\":-1})),", "Generating SHAP feature importance is a very memory intensive operation,", "separate pipeline to transform the validation dataset. This is used", "# MAGIC %md # MAGIC ### Feature standardization # MAGIC", "COMMAND ---------- model_uri = f\"runs:/51c0348482e042ea8e4b7983ab6bff99/model\" model = mlflow.pyfunc.load_model(model_uri) #model.predict(input_X) #", "as the imputed samples may not match the actual data", "# MAGIC - Compare trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) #", "into your project folder by selecting **File > Clone** in", "metrics xgbc_val_metrics = {k.replace(\"val_\", \"\"): v for k, v in", "mlflow.sklearn.autolog(disable=True) X_val_processed = pipeline.fit_transform(X_val, y_val) model # COMMAND ---------- #", "with approval and governance workflows, and monitor ML deployments and", "column. # COMMAND ---------- from sklearn.pipeline import Pipeline from sklearn.preprocessing", "X_train.mode().iloc[0] # Sample background data for SHAP Explainer. Increase the", "pipelines. See the Alerts tab of the AutoML Experiment page", "defined below to `shap_enabled = True` and re-run this notebook", "the true performance of the model on an unseen dataset)", "plot # MAGIC of the relationship between features and model", "# MAGIC ### Text features # MAGIC Convert each feature", "xgbc_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], str(uuid.uuid4())[:8]) os.makedirs(xgbc_temp_dir) xgbc_client = MlflowClient() xgbc_model_env_path =", "Kernel SHAP to explain feature importance on the example from", "# MAGIC ``` # MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\"", "and governance workflows, and monitor ML deployments and their performance.", "shap_values = explainer.shap_values(example, l1_reg=False) summary_plot(shap_values, example, class_names=model.classes_) # COMMAND ----------", "binary columns through one-hot encoding. # MAGIC For each input", "a separate pipeline to transform the validation dataset. This is", "= OneHotEncoder(handle_unknown=\"ignore\") transformers.append((\"onehot\", one_hot_encoder, [\"published\", \"language\", \"site_url\", \"hasImage\", \"title\", \"title_without_stopwords\",", "the validation set. predict = lambda x: model.predict_proba(pd.DataFrame(x, columns=X_train.columns)) explainer", "Test (20% of the dataset used to report the true", "\"published\", \"language\", \"main_img_url\", \"site_url\", \"hasImage\", \"title_without_stopwords\", \"text\", \"title\", \"type\", \"author\"]", "size to reduce variance. train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode) # Sample", "vectorizer, [col])) # COMMAND ---------- from sklearn.compose import ColumnTransformer preprocessor", "Pipeline from sklearn.preprocessing import FunctionTransformer for col in {'type', 'author'}:", "xgbc_classifier), ]) # Create a separate pipeline to transform the", "feature and the target variable. # MAGIC - Generating SHAP", "numpy as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.impute import", "{0: 'aliens are coming to invade earth'}, 'language': {0: 'english'},", "FeatureHasher(n_features=1024, input_type=\"string\"))]) transformers.append((f\"{feature}_hasher\", hash_transformer, [feature])) # COMMAND ---------- # MAGIC", "# MAGIC ### Feature standardization # MAGIC Scale all feature", "the **10-3-ML-Cluster** cluster and rerun it. # MAGIC - Compare", "for the model already trained in this notebook can be", "{0: 'english'}, 'site_url': {0: 'cnn.com'}, 'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type': {0:", "### Categorical columns # COMMAND ---------- # MAGIC %md #", "in training. # MAGIC `[]` are dropped in the pipelines.", "environment YAML for model serving import os import shutil import", "or provide your own examples to explain. # MAGIC -", "df_loaded[target_col] # Split out train data X_train, split_X_rem, y_train, split_y_rem", "in the range [1, 2]. # COMMAND ---------- import numpy", "# MAGIC - Train (60% of the dataset used to", "cell to log a different trial to the MLflow experiment", "to make prediction # COMMAND ---------- model_uri = f\"runs:/51c0348482e042ea8e4b7983ab6bff99/model\" model", "dataset. This is used for early stopping. pipeline = Pipeline([", "the sample size and rerun for more thorough results. example", "it into a pandas DataFrame input_client = MlflowClient() input_data_path =", "COMMAND ---------- from databricks.automl_runtime.sklearn.column_selector import ColumnSelector supported_cols = [\"text_without_stopwords\", \"published\",", "for feature in [\"text\", \"main_img_url\"]: hash_transformer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None,", "= xgbc_client.download_artifacts(mlflow_run.info.run_id, \"model/conda.yaml\", xgbc_temp_dir) xgbc_model_env_str = open(xgbc_model_env_path) xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str,", "and test X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5,", "mlflow.set_experiment(\"/Users/<EMAIL>/databricks_automl/label_news_articles_csv-2022_03_12-15_38\") target_col = \"label\" # COMMAND ---------- # MAGIC %md", "earth'}, 'language': {0: 'english'}, 'site_url': {0: 'cnn.com'}, 'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'},", "the mode (most frequent values). mode = X_train.mode().iloc[0] # Sample", "# MAGIC - Test (20% of the dataset used to", "train_size=0.6, random_state=799811440, stratify=split_y) # Split remaining data equally for validation", "COMMAND ---------- # MAGIC %md # MAGIC ## Train -", "input_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], \"tmp\", str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir) # Download the artifact", "read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). # COMMAND ----------", "Log metrics for the validation set xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val,", "the input column. # COMMAND ---------- from sklearn.pipeline import Pipeline", "# MAGIC examples to explain will be imputed using the", "if shap_enabled: from shap import KernelExplainer, summary_plot # SHAP cannot", "transformers.append((f\"{feature}_hasher\", hash_transformer, [feature])) # COMMAND ---------- # MAGIC %md #", "equal to 1024. Each column corresponds to one of the", "Text features # MAGIC Convert each feature to a fixed-length", "train_test_split split_X = df_loaded.drop([target_col], axis=1) split_y = df_loaded[target_col] # Split", "numeric column is imputed with zeros. # COMMAND ---------- from", "v in xgbc_test_metrics.items()} display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=[\"validation\", \"test\"])) # COMMAND ----------", "more thorough results, increase the sample size of explanations, or", "= mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix=\"val_\") # Log metrics for the", "MAGIC model = mlflow.pyfunc.load_model(model_uri) # MAGIC model.predict(input_X) # MAGIC ```", "very memory intensive operation, so to ensure that AutoML can", "Split the input data into 3 sets: # MAGIC -", "learning models, providing a summary plot # MAGIC of the", "in the input column. # COMMAND ---------- from sklearn.pipeline import", "# COMMAND ---------- model_uri = f\"runs:/51c0348482e042ea8e4b7983ab6bff99/model\" model = mlflow.pyfunc.load_model(model_uri) #model.predict(input_X)", "model.predict_proba(pd.DataFrame(x, columns=X_train.columns)) explainer = KernelExplainer(predict, train_sample, link=\"logit\") shap_values = explainer.shap_values(example,", "SHAP to explain feature importance on the example from the", "the artifact and read it into a pandas DataFrame input_client", "xgbc_client = MlflowClient() xgbc_model_env_path = xgbc_client.download_artifacts(mlflow_run.info.run_id, \"model/conda.yaml\", xgbc_temp_dir) xgbc_model_env_str =", "centered around zero with unit variance. # COMMAND ---------- from", "approach to explain machine learning models, providing a summary plot", "MAGIC For more thorough results, increase the sample size of", "a very memory intensive operation, so to ensure that AutoML", "COMMAND ---------- # MAGIC %md # MAGIC #### Low-cardinality categoricals", "Load Data # COMMAND ---------- from mlflow.tracking import MlflowClient import", "MAGIC Each numeric column is imputed with zeros. # COMMAND", "to the model registry and to retrieve it later for", "their performance. The snippets below show how to add the", "import uuid import shutil import pandas as pd # Create", "with the mode (most frequent values). mode = X_train.mode().iloc[0] #", "\"tmp\", str(uuid.uuid4())[:8]) os.makedirs(input_temp_dir) # Download the artifact and read it", "with zeros. # COMMAND ---------- from sklearn.feature_extraction import FeatureHasher from", "# COMMAND ---------- # MAGIC %md # MAGIC #### Medium-cardinality", "``` # MAGIC # MAGIC ### Load from Model Registry", "ColumnTransformer preprocessor = ColumnTransformer(transformers, remainder=\"passthrough\", sparse_threshold=0) # COMMAND ---------- #", ") model = Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer),", "experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Change the model parameters and re-run", "trained in this notebook can be found in the cell", "Log metrics for the test set xgbc_test_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_test,", "'site_url': {0: 'cnn.com'}, 'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type': {0: 'bs'}, 'title_without_stopwords':", "y_train, split_y_rem = train_test_split(split_X, split_y, train_size=0.6, random_state=799811440, stratify=split_y) # Split", "Pipeline for feature in [\"text\", \"main_img_url\"]: hash_transformer = Pipeline(steps=[ (\"imputer\",", "can be found in the cell below # MAGIC #", "used to report the true performance of the model on", "supported. This allows us to train a model that can", "dataset used to train the model) # MAGIC - Validation", "feature importance is a very memory intensive operation, so to", "MAGIC ## Preprocessors # COMMAND ---------- transformers = [] #", "affects the computed # MAGIC SHAP values, as the imputed", "data df_loaded.head(5) # COMMAND ---------- df_loaded.head(1).to_dict() # COMMAND ---------- #", "the mode (most frequent values). This affects the computed #", "from experimentation to online testing and production, integrate with approval", "a collaborative hub where teams can share ML models, work", "'language': {0: 'english'}, 'site_url': {0: 'cnn.com'}, 'main_img_url': {0: 'https://2.bp.blogspot.com/-0mdp0nZiwMI/UYwYvexmW2I/AAAAAAAAVQM/7C_X5WRE_mQ/w1200-h630-p-nu/Edison-Stock-Ticker.jpg'}, 'type':", "artifact and read it into a pandas DataFrame input_client =", "supported columns # MAGIC Select only the columns that are", "model = Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer), (\"classifier\",", "{'type', 'author'}: vectorizer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), #", "columns is equal to the number of unique values in", "dataset used to report the true performance of the model", "y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem) # COMMAND", "1024 float columns. # MAGIC Each numeric column is imputed", "= \"Example\" # MAGIC model_version = registered_model_version.version # MAGIC #", "thorough results, increase the sample size of explanations, or provide", "Download the artifact and read it into a pandas DataFrame", "importance, and impact/color describe the correlation between the feature and", "directory to download input data from MLflow input_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"],", "- Generating SHAP feature importance is a very memory intensive", "sklearn.pipeline import Pipeline set_config(display=\"diagram\") xgbc_classifier = XGBClassifier( colsample_bytree=0.7324555878929649, learning_rate=0.007636627530856404, max_depth=7,", "shutil import pandas as pd # Create temp directory to", "running out of memory, we disable SHAP by default.<br />", "to add the model trained in this notebook to the", "of unique values in the input column. # COMMAND ----------", "input_client.download_artifacts(\"c2dfe80b419d4a8dbc88a90e3274369a\", \"data\", input_temp_dir) df_loaded = pd.read_parquet(os.path.join(input_data_path, \"training_data\")) # Delete the", "thorough results. example = X_val.sample(n=1).fillna(mode) # Use Kernel SHAP to", "%md # MAGIC ## Train - Validation - Test Split", "Load model without registering # MAGIC ``` # MAGIC model_uri", "Loader=yaml.FullLoader) xgbc_parsed_model_env_str[\"dependencies\"][-1][\"pip\"].append(f\"xgboost=={xgboost.__version__}\") with open(xgbc_model_env_path, \"w\") as f: f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path,", "validation set to explain. Increase the sample size and rerun", "{0: 'aliens are coming to invade earth'}, 'text': {0: 'aliens", "the runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC", "True # COMMAND ---------- if shap_enabled: from shap import KernelExplainer,", "notebook. To reproduce these results, attach this notebook to the", "``` # COMMAND ---------- # model_uri for the generated model", "# MAGIC - To reduce the computational overhead of each", "relevant metrics to MLflow to track runs # MAGIC -", "model_uri for the generated model print(f\"runs:/{ mlflow_run.info.run_id }/model\") # COMMAND", "imputed using the mode (most frequent values). This affects the", "see the SHAP plots. # MAGIC - To reduce the", "[The MLflow Model Registry](https://docs.databricks.com/applications/mlflow/model-registry.html) is a collaborative hub where teams", "# MAGIC - Generating SHAP feature importance is a very", "TfidfVectorizer from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline from", "for the validation set xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val, prefix=\"val_\")", "explain models using data with nulls; if your dataset has", "### Register to Model Registry # MAGIC ``` # MAGIC", "column into multiple binary columns through one-hot encoding. # MAGIC", "the computational overhead of each trial, a single example is", "metrics to MLflow to track runs # MAGIC - All", "on the example from the validation set. predict = lambda", "# MAGIC ``` # COMMAND ---------- # model_uri for the", "of # MAGIC importance, and impact/color describe the correlation between", "output of the cell below # COMMAND ---------- from xgboost", "set to explain.<br /> # MAGIC For more thorough results,", "- Test (20% of the dataset used to report the", "trials in the [MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC - Navigate to", "mode = X_train.mode().iloc[0] # Sample background data for SHAP Explainer.", "registered_model_version.version # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri=f\"models:/{model_name}/{model_version}\") # MAGIC", "link=\"logit\") shap_values = explainer.shap_values(example, l1_reg=False) summary_plot(shap_values, example, class_names=model.classes_) # COMMAND", "- Change the model parameters and re-run the training cell", "useful.) # MAGIC - Clone this notebook into your project", "OneHotEncoder(handle_unknown=\"ignore\") transformers.append((\"onehot\", one_hot_encoder, [\"published\", \"language\", \"site_url\", \"hasImage\", \"title\", \"title_without_stopwords\", \"text_without_stopwords\"]))", "MAGIC %md # MAGIC #### Low-cardinality categoricals # MAGIC Convert", "columns through one-hot encoding. # MAGIC For each input categorical", "one_hot_encoder = OneHotEncoder(handle_unknown=\"ignore\") transformers.append((\"onehot\", one_hot_encoder, [\"published\", \"language\", \"site_url\", \"hasImage\", \"title\",", "as np from sklearn.feature_extraction.text import TfidfVectorizer from sklearn.impute import SimpleImputer", "output # MAGIC vector is equal to 1024. Each column", "these columns are dropped. # COMMAND ---------- from databricks.automl_runtime.sklearn.column_selector import", "2]. # COMMAND ---------- import numpy as np from sklearn.feature_extraction.text", "# MAGIC - To view the full list of tunable", "model) # MAGIC - Test (20% of the dataset used", "and re-run the notebook to see the SHAP plots shap_enabled", "= mlflow.pyfunc.load_model(model_uri) # MAGIC model.predict(input_X) # MAGIC ``` # COMMAND", "# COMMAND ---------- from databricks.automl_runtime.sklearn.column_selector import ColumnSelector supported_cols = [\"text_without_stopwords\",", "= KernelExplainer(predict, train_sample, link=\"logit\") shap_values = explainer.shap_values(example, l1_reg=False) summary_plot(shap_values, example,", "xgbc_client.download_artifacts(mlflow_run.info.run_id, \"model/conda.yaml\", xgbc_temp_dir) xgbc_model_env_str = open(xgbc_model_env_path) xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader)", "are coming to invade earth'}, 'language': {0: 'english'}, 'site_url': {0:", "# MAGIC - Log relevant metrics to MLflow to track", "is hashed to 1024 float columns. # MAGIC Each numeric", "= True # COMMAND ---------- if shap_enabled: from shap import", "are imputed with the mode (most frequent values). mode =", "notebook to the **10-3-ML-Cluster** cluster and rerun it. # MAGIC", "where n is in the range [1, 2]. # COMMAND", "# COMMAND ---------- from sklearn.preprocessing import StandardScaler standardizer = StandardScaler()", "run trials without # MAGIC running out of memory, we", "# MAGIC ### Load from Model Registry # MAGIC ```", "features # MAGIC Convert each feature to a fixed-length vector", "---------- # MAGIC %md # MAGIC ### Loading model to", "classifier__eval_set=[(X_val_processed,y_val)], classifier__verbose=False) # Training metrics are logged by MLflow autologging", "zero with unit variance. # COMMAND ---------- from sklearn.preprocessing import", "v for k, v in xgbc_test_metrics.items()} display(pd.DataFrame([xgbc_val_metrics, xgbc_test_metrics], index=[\"validation\", \"test\"]))", "MAGIC ### Categorical columns # COMMAND ---------- # MAGIC %md", "re-run this notebook to see the SHAP plots. # MAGIC", "are coming to invade earth'}, 'text_without_stopwords': {0: 'aliens are coming", "for model serving import os import shutil import uuid import", "actual data distribution. # MAGIC # MAGIC For more information", "%md # MAGIC ### Categorical columns # COMMAND ---------- #", "MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri) # MAGIC model.predict(input_X) #", "is equal to 1024. Each column corresponds to one of", "that are not used in training. # MAGIC `[]` are", "Alerts tab of the AutoML Experiment page for details on", "shutil import uuid import yaml None import xgboost from mlflow.tracking", "sklearn.model_selection import train_test_split split_X = df_loaded.drop([target_col], axis=1) split_y = df_loaded[target_col]", "# MAGIC This is an auto-generated notebook. To reproduce these", "[\"text_without_stopwords\", \"published\", \"language\", \"main_img_url\", \"site_url\", \"hasImage\", \"title_without_stopwords\", \"text\", \"title\", \"type\",", "%md # MAGIC #### Medium-cardinality categoricals # MAGIC Convert each", "---------- import mlflow import databricks.automl_runtime # Use MLflow to track", "and rerun for more thorough results. example = X_val.sample(n=1).fillna(mode) #", "frequent values). mode = X_train.mode().iloc[0] # Sample background data for", "numerical representation. # MAGIC Each string column is hashed to", "to download input data from MLflow input_temp_dir = os.path.join(os.environ[\"SPARK_LOCAL_DIRS\"], \"tmp\",", "MlflowClient import os import uuid import shutil import pandas as", "# MAGIC ``` # MAGIC # MAGIC ### Load from", "{0: 'aliens are coming to invade earth'}, 'text_without_stopwords': {0: 'aliens", "explain feature importance on the example from the validation set.", "of the AutoML Experiment page for details on why these", "sklearn.pipeline import Pipeline from sklearn.preprocessing import FunctionTransformer for col in", "'<EMAIL>jim.<EMAIL>'}, 'published': {0: '2016-10-27T18:05:26.351+03:00'}, 'title': {0: 'aliens are coming to", "with unit variance. # COMMAND ---------- from sklearn.preprocessing import StandardScaler", "true performance of the model on an unseen dataset) #", "# MAGIC ### Register to Model Registry # MAGIC ```", "of the dataset used to train the model) # MAGIC", "---------- # MAGIC %md # MAGIC ## Train - Validation", "the Experiments UI, this link isn't very useful.) # MAGIC", "**NOTE:** The `model_uri` for the model already trained in this", "Display the logged metrics xgbc_val_metrics = {k.replace(\"val_\", \"\"): v for", "models mlflow.sklearn.autolog(log_input_examples=True, silent=True) with mlflow.start_run(run_name=\"xgboost\") as mlflow_run: model.fit(X_train, y_train, classifier__early_stopping_rounds=5,", "autologging # Log metrics for the validation set xgbc_val_metrics =", "an unseen dataset) # COMMAND ---------- df_loaded.columns # COMMAND ----------", "imputed with zeros. # COMMAND ---------- from sklearn.feature_extraction import FeatureHasher", "sample size and rerun for more thorough results. example =", "mlflow_run.info.run_id }/model\" # MAGIC registered_model_version = mlflow.register_model(model_uri, model_name) # MAGIC", "the cell below # COMMAND ---------- from xgboost import XGBClassifier", "AutoML Experiment page for details on why these columns are", "import shutil import pandas as pd # Create temp directory", "number of unique values in the input column. # COMMAND", "# MAGIC # MAGIC ### Register to Model Registry #", "sklearn.pipeline import Pipeline for feature in [\"text\", \"main_img_url\"]: hash_transformer =", "# Create a separate pipeline to transform the validation dataset.", "Set this flag to True and re-run the notebook to", "us to train a model that can predict on a", "how to add the model trained in this notebook to", "Model Registry # MAGIC ``` # MAGIC model_name = \"Example\"", "The `model_uri` for the model already trained in this notebook", "MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri=f\"models:/{model_name}/{model_version}\") # MAGIC model.predict(input_X) #", "input samples, metrics, parameters, and models mlflow.sklearn.autolog(log_input_examples=True, silent=True) with mlflow.start_run(run_name=\"xgboost\")", "pipeline.fit_transform(X_val, y_val) model # COMMAND ---------- # Enable automatic logging", "Validation (20% of the dataset used to tune the hyperparameters", "kw_args={\"newshape\":-1})), (\"tfidf\", TfidfVectorizer(decode_error=\"ignore\", ngram_range = (1, 2), max_features=1024))]) transformers.append((f\"text_{col}\", vectorizer,", "# MAGIC Split the input data into 3 sets: #", "input_data_path = input_client.download_artifacts(\"c2dfe80b419d4a8dbc88a90e3274369a\", \"data\", input_temp_dir) df_loaded = pd.read_parquet(os.path.join(input_data_path, \"training_data\")) #", "classification model # MAGIC - Log relevant metrics to MLflow", "MAGIC #### Low-cardinality categoricals # MAGIC Convert each low-cardinality categorical", "categoricals # MAGIC Convert each low-cardinality categorical column into multiple", "from xgboost import XGBClassifier help(XGBClassifier) # COMMAND ---------- import mlflow", "# COMMAND ---------- from sklearn.pipeline import Pipeline from sklearn.preprocessing import", "# MAGIC SHAP is a game-theoretic approach to explain machine", "xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path=\"model\") shutil.rmtree(xgbc_temp_dir) # COMMAND ---------- # MAGIC %md", "_10.3.x-cpu-ml-scala2.12_ # COMMAND ---------- import mlflow import databricks.automl_runtime # Use", "data into 3 sets: # MAGIC - Train (60% of", "XGBClassifier( colsample_bytree=0.7324555878929649, learning_rate=0.007636627530856404, max_depth=7, min_child_weight=6, n_estimators=106, n_jobs=100, subsample=0.6972187716458148, verbosity=0, random_state=799811440,", "---------- from sklearn.preprocessing import StandardScaler standardizer = StandardScaler() # COMMAND", "xgbc_val_metrics = {k.replace(\"val_\", \"\"): v for k, v in xgbc_val_metrics.items()}", "v in xgbc_val_metrics.items()} xgbc_test_metrics = {k.replace(\"test_\", \"\"): v for k,", "\"label\" # COMMAND ---------- # MAGIC %md # MAGIC ##", "%md # MAGIC ## Feature importance # MAGIC # MAGIC", "COMMAND ---------- from sklearn.pipeline import Pipeline from sklearn.preprocessing import OneHotEncoder", "# model_uri for the generated model print(f\"runs:/{ mlflow_run.info.run_id }/model\") #", "runs are logged under [this MLflow experiment](#mlflow/experiments/406583024052808/s?orderByKey=metrics.%60val_f1_score%60&orderByAsc=false) # MAGIC -", "these results, attach this notebook to the **10-3-ML-Cluster** cluster and", "to 1024. Each column corresponds to one of the top", "logged by MLflow autologging # Log metrics for the validation", "project folder by selecting **File > Clone** in the notebook", "y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem) # COMMAND ----------", "### Text features # MAGIC Convert each feature to a", "# MAGIC # MAGIC Runtime Version: _10.3.x-cpu-ml-scala2.12_ # COMMAND ----------", "MAGIC importance, and impact/color describe the correlation between the feature", "number of output columns is equal to the number of", "and rerun it. # MAGIC - Compare trials in the", "# MAGIC %md # MAGIC ## Feature importance # MAGIC", "Pipeline from sklearn.preprocessing import OneHotEncoder one_hot_encoder = OneHotEncoder(handle_unknown=\"ignore\") transformers.append((\"onehot\", one_hot_encoder,", "the computed # MAGIC SHAP values, as the imputed samples", "is in the range [1, 2]. # COMMAND ---------- import", "stopping. pipeline = Pipeline([ (\"column_selector\", col_selector), (\"preprocessor\", preprocessor), (\"standardizer\", standardizer),", "# COMMAND ---------- # MAGIC %md # MAGIC ## Preprocessors", "COMMAND ---------- from sklearn.preprocessing import StandardScaler standardizer = StandardScaler() #", "---------- # MAGIC %md # MAGIC ## Feature importance #", "data with nulls; if your dataset has any, both the", "COMMAND ---------- # MAGIC %md # MAGIC ## Inference #", "out train data X_train, split_X_rem, y_train, split_y_rem = train_test_split(split_X, split_y,", "class_names=model.classes_) # COMMAND ---------- # MAGIC %md # MAGIC ##", "as pd # Create temp directory to download input data", "{0: 'bs'}, 'title_without_stopwords': {0: 'aliens are coming to invade earth'},", "= True` and re-run this notebook to see the SHAP", "test X_val, X_test, y_val, y_test = train_test_split(split_X_rem, split_y_rem, test_size=0.5, random_state=799811440,", "notebook can be found in the cell below # MAGIC", "sklearn.preprocessing import OneHotEncoder one_hot_encoder = OneHotEncoder(handle_unknown=\"ignore\") transformers.append((\"onehot\", one_hot_encoder, [\"published\", \"language\",", "categoricals # MAGIC Convert each medium-cardinality categorical column into a", "= pd.read_parquet(os.path.join(input_data_path, \"training_data\")) # Delete the temp data shutil.rmtree(input_temp_dir) #", "examples to explain. # MAGIC - SHAP cannot explain models", "has any, both the background data and # MAGIC examples", "MLflow experiment # MAGIC - To view the full list", "Categorical columns # COMMAND ---------- # MAGIC %md # MAGIC", "dataset) # COMMAND ---------- df_loaded.columns # COMMAND ---------- from sklearn.model_selection", "are not used in training. # MAGIC `[]` are dropped", "MAGIC Convert each low-cardinality categorical column into multiple binary columns", "FeatureHasher from sklearn.impute import SimpleImputer from sklearn.pipeline import Pipeline for", "metrics for the validation set xgbc_val_metrics = mlflow.sklearn.eval_and_log_metrics(model, X_val, y_val,", "temp data shutil.rmtree(input_temp_dir) # Preview data df_loaded.head(5) # COMMAND ----------", "Load from Model Registry # MAGIC ``` # MAGIC model_name", "mlflow_run.info.run_id }/model\" # MAGIC # MAGIC model = mlflow.pyfunc.load_model(model_uri) #", "import mlflow import databricks.automl_runtime # Use MLflow to track experiments", "xgbc_model_env_str = open(xgbc_model_env_path) xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader) xgbc_parsed_model_env_str[\"dependencies\"][-1][\"pip\"].append(f\"xgboost=={xgboost.__version__}\") with open(xgbc_model_env_path,", "# COMMAND ---------- df_loaded.head(1).to_dict() # COMMAND ---------- # MAGIC %md", "Data # COMMAND ---------- from mlflow.tracking import MlflowClient import os", "the correlation between the feature and the target variable. #", "mlflow.pyfunc.load_model(model_uri=f\"models:/{model_name}/{model_version}\") # MAGIC model.predict(input_X) # MAGIC ``` # MAGIC #", "the SHAP plots shap_enabled = True # COMMAND ---------- if", "different trial to the MLflow experiment # MAGIC - To", "f: f.write(yaml.dump(xgbc_parsed_model_env_str)) xgbc_client.log_artifact(run_id=mlflow_run.info.run_id, local_path=xgbc_model_env_path, artifact_path=\"model\") shutil.rmtree(xgbc_temp_dir) # COMMAND ---------- #", "xgboost import XGBClassifier help(XGBClassifier) # COMMAND ---------- import mlflow import", "Each numeric column is imputed with zeros. # COMMAND ----------", "to explain.<br /> # MAGIC For more thorough results, increase", "split_y_rem, test_size=0.5, random_state=799811440, stratify=split_y_rem) # COMMAND ---------- # MAGIC %md", "of each trial, a single example is sampled from the", "---------- from sklearn.model_selection import train_test_split split_X = df_loaded.drop([target_col], axis=1) split_y", "xgbc_parsed_model_env_str = yaml.load(xgbc_model_env_str, Loader=yaml.FullLoader) xgbc_parsed_model_env_str[\"dependencies\"][-1][\"pip\"].append(f\"xgboost=={xgboost.__version__}\") with open(xgbc_model_env_path, \"w\") as f:", "MAGIC For more information on how to read Shapley values,", "# MAGIC vector is equal to 1024. Each column corresponds", "= [\"text_without_stopwords\", \"published\", \"language\", \"main_img_url\", \"site_url\", \"hasImage\", \"title_without_stopwords\", \"text\", \"title\",", "equal to the number of unique values in the input", "SHAP cannot explain models using data with nulls; if your", "equally for validation and test X_val, X_test, y_val, y_test =", "\"main_img_url\"]: hash_transformer = Pipeline(steps=[ (\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), (f\"{feature}_hasher\", FeatureHasher(n_features=1024,", "validation dataset. This is used for early stopping. pipeline =", "(\"imputer\", SimpleImputer(missing_values=None, strategy=\"constant\", fill_value=\"\")), # Reshape to 1D since SimpleImputer", "notebook to the model registry and to retrieve it later", "governance workflows, and monitor ML deployments and their performance. The", "to be centered around zero with unit variance. # COMMAND", "hub where teams can share ML models, work together from", "how to read Shapley values, see the [SHAP documentation](https://shap.readthedocs.io/en/latest/example_notebooks/overviews/An%20introduction%20to%20explainable%20AI%20with%20Shapley%20values.html). #", "MAGIC model.predict(input_X) # MAGIC ``` # MAGIC # MAGIC ###", "Scale all feature columns to be centered around zero with", "- Validation (20% of the dataset used to tune the", "Select only the columns that are supported. This allows us", "xgbc_val_metrics.items()} xgbc_test_metrics = {k.replace(\"test_\", \"\"): v for k, v in", "the sample size to reduce variance. train_sample = X_train.sample(n=min(100, len(X_train.index))).fillna(mode)", "---------- if shap_enabled: from shap import KernelExplainer, summary_plot # SHAP", "ColumnSelector supported_cols = [\"text_without_stopwords\", \"published\", \"language\", \"main_img_url\", \"site_url\", \"hasImage\", \"title_without_stopwords\",", "background data for SHAP Explainer. Increase the sample size to", "MAGIC model_name = \"Example\" # MAGIC # MAGIC model_uri =", "Medium-cardinality categoricals # MAGIC Convert each medium-cardinality categorical column into", "invade earth'}, 'text': {0: 'aliens are coming to invade earth'},", "cannot explain models using data with nulls; if your dataset", "= pd.DataFrame(data=data) df.head() # COMMAND ---------- model.predict(df) # COMMAND ----------", "tunable hyperparameters, check the output of the cell below #", "default.<br /> # MAGIC You can set the flag defined", "fill_value=\"\")), # Reshape to 1D since SimpleImputer changes the shape", "import pandas as pd data = {'author': {0: '<EMAIL>jim.<EMAIL>'}, 'published':", "explain. # MAGIC - SHAP cannot explain models using data", "the cell below # MAGIC # MAGIC ### Register to", "summary_plot # SHAP cannot explain models using data with nulls.", "from shap import KernelExplainer, summary_plot # SHAP cannot explain models", "imputed samples may not match the actual data distribution. #", "MAGIC ``` # MAGIC model_name = \"Example\" # MAGIC model_version", "dataset that has extra columns that are not used in", "To enable SHAP to succeed, both the background data and", "For more thorough results, increase the sample size of explanations,", "### Load model without registering # MAGIC ``` # MAGIC", "notebook to see the SHAP plots shap_enabled = True #", "MAGIC - Test (20% of the dataset used to report", "## Train - Validation - Test Split # MAGIC Split", "Preprocessors # COMMAND ---------- transformers = [] # COMMAND ----------", "= \"label\" # COMMAND ---------- # MAGIC %md # MAGIC", "MAGIC ``` # MAGIC model_uri = f\"runs:/{ mlflow_run.info.run_id }/model\" #", "memory, we disable SHAP by default.<br /> # MAGIC You", "# MAGIC model.predict(input_X) # MAGIC ``` # COMMAND ---------- #", "ML deployments and their performance. The snippets below show how", "MAGIC Each string column is hashed to 1024 float columns.", "performance of the model on an unseen dataset) # COMMAND", "---------- from sklearn.compose import ColumnTransformer preprocessor = ColumnTransformer(transformers, remainder=\"passthrough\", sparse_threshold=0)", "used to train the model) # MAGIC - Validation (20%", "with nulls. # To enable SHAP to succeed, both the", "mlflow_run.info.run_id }/model\") # COMMAND ---------- # MAGIC %md # MAGIC", "on why these columns are dropped. # COMMAND ---------- from", "flag defined below to `shap_enabled = True` and re-run this" ]
[ "importlib.import_module('.'+module_name,package=__name__) try: globals().update({k: getattr(module, k) for k in module.__all__}) __all__", "for k in module.__all__}) __all__ += module.__all__ except AttributeError: continue", "__all__ = [] for loader, module_name, is_pkg in pkgutil.walk_packages(__path__): module", "pkgutil.walk_packages(__path__): module = importlib.import_module('.'+module_name,package=__name__) try: globals().update({k: getattr(module, k) for k", "in pkgutil.walk_packages(__path__): module = importlib.import_module('.'+module_name,package=__name__) try: globals().update({k: getattr(module, k) for", "is_pkg in pkgutil.walk_packages(__path__): module = importlib.import_module('.'+module_name,package=__name__) try: globals().update({k: getattr(module, k)", "= [] for loader, module_name, is_pkg in pkgutil.walk_packages(__path__): module =", "try: globals().update({k: getattr(module, k) for k in module.__all__}) __all__ +=", "module = importlib.import_module('.'+module_name,package=__name__) try: globals().update({k: getattr(module, k) for k in", "k) for k in module.__all__}) __all__ += module.__all__ except AttributeError:", "module_name, is_pkg in pkgutil.walk_packages(__path__): module = importlib.import_module('.'+module_name,package=__name__) try: globals().update({k: getattr(module,", "for loader, module_name, is_pkg in pkgutil.walk_packages(__path__): module = importlib.import_module('.'+module_name,package=__name__) try:", "loader, module_name, is_pkg in pkgutil.walk_packages(__path__): module = importlib.import_module('.'+module_name,package=__name__) try: globals().update({k:", "import importlib import pkgutil __all__ = [] for loader, module_name,", "globals().update({k: getattr(module, k) for k in module.__all__}) __all__ += module.__all__", "importlib import pkgutil __all__ = [] for loader, module_name, is_pkg", "[] for loader, module_name, is_pkg in pkgutil.walk_packages(__path__): module = importlib.import_module('.'+module_name,package=__name__)", "pkgutil __all__ = [] for loader, module_name, is_pkg in pkgutil.walk_packages(__path__):", "= importlib.import_module('.'+module_name,package=__name__) try: globals().update({k: getattr(module, k) for k in module.__all__})", "getattr(module, k) for k in module.__all__}) __all__ += module.__all__ except", "import pkgutil __all__ = [] for loader, module_name, is_pkg in" ]
[ "set(center_pts) if order > 1: for x, y in list(diff_array):", "= ~np.sum(np.array(list(map(grid.get, idx))), dtype = bool) if should_create_a_block: grid =", "in tiles: grid[tile.pt1[0], tile.pt1[1]] = tile grid[tile.pt2[0], tile.pt2[1]] = tile", "as aso def spawn_block(x, y): if np.random.rand() > 0.5: d1", "y]) + grid[x, y].v if (grid[next_x, next_y] != False): if", "= np.meshgrid(np.arange(2 * order) - (2 * order - 1)/2", "(x,y+1), (x+1,y+1)] try: should_create_a_block = ~np.sum(np.array(list(map(grid.get, idx))), dtype = bool)", "= [(x,y), (x+1,y), (x,y+1), (x+1,y+1)] try: should_create_a_block = ~np.sum(np.array(list(map(grid.get, idx))),", "Y_aztec = aztec_grid(order, True) for (x,y) in zip(X_aztec, Y_aztec): grid[x,", "grid[x, y].v if (grid[next_x, next_y] != False): if all(grid[next_x, next_y].v", "order - 1) else: idx = np.abs(X) + np.abs(Y) <=", "1], center_pts[:, 0]))] X = center_pts[:, 0] Y = center_pts[:,", "center_pts[:, 1] for (x,y) in zip(X,Y): try: if ~grid[x, y]:", "y] = False return grid def move_tiles(grid, curr_order): temp_grid =", "d2] def aztec_grid(order, only_new_blocks = True): grid_X, grid_Y = np.meshgrid(np.arange(2", "center_pts[:,0] Y = center_pts[:,1] if only_new_blocks: idx = (np.abs(X) +", "1])) else: d1 = domino.domino(np.array([x, y]), np.array([x, y + 1]),", "y ] = False grid[next_x, next_y] = False except: pass", "30 22:04:48 2020 @author: baptistelafoux \"\"\" import domino import numpy", "= aztec_grid(order) center_pts_aztec = [tuple([x,y]) for (x,y) in zip(X_aztec, Y_aztec)]", "zip(X_aztec, Y_aztec): grid[x, y] = False return grid def move_tiles(grid,", "if should_create_a_block: grid = add_to_grid(spawn_block(x, y), grid) except: pass except:", "# -*- coding: utf-8 -*- \"\"\" Created on Wed Dec", "grid[tile.pt1[0], tile.pt1[1]] = tile grid[tile.pt2[0], tile.pt2[1]] = tile return grid", "= domino.domino(np.array([x + 1, y]), np.array([x + 1, y +", "= np.array([*grid]) center_pts = center_pts[np.lexsort((center_pts[:, 1], center_pts[:, 0]))] X =", "np.array([0, 1])) else: d1 = domino.domino(np.array([x, y]), np.array([x, y +", "False else: for (x,y) in zip(X_aztec, Y_aztec): grid[x, y] =", "should_create_a_block: grid = add_to_grid(spawn_block(x, y), grid) except: pass except: pass", "grid[x2, y2] = False for coord in temp_grid: grid[coord] =", "X_aztec, Y_aztec = aztec_grid(order) center_pts_aztec = [tuple([x,y]) for (x,y) in", "+ np.abs(Y) > order - 1) else: idx = np.abs(X)", "#!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created on", "True) for (x,y) in zip(X_aztec, Y_aztec): grid[x, y] = False", "only_new_blocks: idx = (np.abs(X) + np.abs(Y) <= order) & (np.abs(X)", "1]), np.array([0, 1])) else: d1 = domino.domino(np.array([x, y]), np.array([x, y", "+ np.abs(Y) <= order) & (np.abs(X) + np.abs(Y) > order", "= temp_grid[coord] return grid def destroy_bad_blocks(grid): center_pts = np.array([*grid]) X", "center_pts[:,0]))] X = center_pts[:,0] Y = center_pts[:,1] if only_new_blocks: idx", "Y = center_pts[:, 1] for (x,y) in zip(X,Y): try: next_x,", "return [d1, d2] def aztec_grid(order, only_new_blocks = True): grid_X, grid_Y", "numpy.lib.arraysetops as aso def spawn_block(x, y): if np.random.rand() > 0.5:", "center_pts[:, 0]))] X = center_pts[:, 0] Y = center_pts[:, 1]", "in zip(X,Y): try: next_x, next_y = np.array([x, y]) + grid[x,", "diff_array = set(center_pts_aztec) - set(center_pts) if order > 1: for", "Wed Dec 30 22:04:48 2020 @author: baptistelafoux \"\"\" import domino", "if order > 1: for x, y in list(diff_array): grid[x,", "y in list(diff_array): grid[x, y] = False else: for (x,y)", "try: if ~grid[x, y]: idx = [(x,y), (x+1,y), (x,y+1), (x+1,y+1)]", "False): if all(grid[next_x, next_y].v == - grid[x, y].v): grid[x, y", "np.array([-1,0])) d2 = domino.domino(np.array([x + 1, y]), np.array([x + 1,", "y]), np.array([x, y + 1]), np.array([-1,0])) d2 = domino.domino(np.array([x +", "\"\"\" import domino import numpy as np import numpy.lib.arraysetops as", "try: next_x, next_y = np.array([x, y]) + grid[x, y].v if", "np.abs(Y) <= order return X[idx], Y[idx] def add_to_grid(tiles, grid): for", "= np.array([x, y]) + grid[x, y].v if (grid[next_x, next_y] !=", "center_pts = np.array([grid_X.flatten(), grid_Y.flatten()]).T center_pts = center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))] X =", "(2 * order - 1)/2 , np.arange(2 * order) -", "True): grid_X, grid_Y = np.meshgrid(np.arange(2 * order) - (2 *", "aztec_grid(order, True) for (x,y) in zip(X_aztec, Y_aztec): grid[x, y] =", "def generate_good_block(grid): center_pts = np.array([*grid]) center_pts = center_pts[np.lexsort((center_pts[:, 1], center_pts[:,", "temp_grid = {} for coord in grid: if grid[coord] !=", "2020 @author: baptistelafoux \"\"\" import domino import numpy as np", "1)/2) center_pts = np.array([grid_X.flatten(), grid_Y.flatten()]).T center_pts = center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))] X", "def enlarge_grid(grid, order): X_aztec, Y_aztec = aztec_grid(order, True) for (x,y)", "= domino.domino(np.array([x, y]), np.array([x, y + 1]), np.array([-1,0])) d2 =", "in temp_grid: grid[coord] = temp_grid[coord] return grid def destroy_bad_blocks(grid): center_pts", "def move_tiles(grid, curr_order): temp_grid = {} for coord in grid:", "else: d1 = domino.domino(np.array([x, y]), np.array([x, y + 1]), np.array([-1,0]))", "center_pts[:, 1] for (x,y) in zip(X,Y): try: next_x, next_y =", "[(x,y), (x+1,y), (x,y+1), (x+1,y+1)] try: should_create_a_block = ~np.sum(np.array(list(map(grid.get, idx))), dtype", "temp_grid = add_to_grid([grid[coord]], temp_grid) grid[x1, y1] = False grid[x2, y2]", "X_aztec, Y_aztec = aztec_grid(order, True) for (x,y) in zip(X_aztec, Y_aztec):", "center_pts = np.array([*grid]) X = center_pts[:, 0] Y = center_pts[:,", "y].v if (grid[next_x, next_y] != False): if all(grid[next_x, next_y].v ==", "grid: if grid[coord] != False: x1, y1 = grid[coord].pt1 x2,", "* order - 1)/2 , np.arange(2 * order) - (2", "next_y = np.array([x, y]) + grid[x, y].v if (grid[next_x, next_y]", "= add_to_grid([grid[coord]], temp_grid) grid[x1, y1] = False grid[x2, y2] =", "for x, y in list(diff_array): grid[x, y] = False else:", "= center_pts[:,1] if only_new_blocks: idx = (np.abs(X) + np.abs(Y) <=", "y].v): grid[x, y ] = False grid[next_x, next_y] = False", "False grid[x2, y2] = False for coord in temp_grid: grid[coord]", "1]), np.array([x + 1, y + 1]), np.array([0, 1])) else:", "tile return grid def generate_good_block(grid): center_pts = np.array([*grid]) center_pts =", "in list(diff_array): grid[x, y] = False else: for (x,y) in", "zip(X_aztec, Y_aztec): grid[x, y] = False return grid def enlarge_grid(grid,", "pass return grid def enlarge_grid_deprec(grid, order): center_pts = [*grid] X_aztec,", "def enlarge_grid_deprec(grid, order): center_pts = [*grid] X_aztec, Y_aztec = aztec_grid(order)", "> 1: for x, y in list(diff_array): grid[x, y] =", "> 0.5: d1 = domino.domino(np.array([x, y]), np.array([x + 1, y]),", "= center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))] X = center_pts[:,0] Y = center_pts[:,1] if", "- 1)/2) center_pts = np.array([grid_X.flatten(), grid_Y.flatten()]).T center_pts = center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))]", "center_pts = center_pts[np.lexsort((center_pts[:, 1], center_pts[:, 0]))] X = center_pts[:, 0]", "np.array([x + 1, y]), np.array([0,-1])) d2 = domino.domino(np.array([x, y +", "coord in temp_grid: grid[coord] = temp_grid[coord] return grid def destroy_bad_blocks(grid):", "in grid: if grid[coord] != False: x1, y1 = grid[coord].pt1", "np.meshgrid(np.arange(2 * order) - (2 * order - 1)/2 ,", "add_to_grid(tiles, grid): for tile in tiles: grid[tile.pt1[0], tile.pt1[1]] = tile", "y]), np.array([x + 1, y + 1]), np.array([ 1,0])) return", "all(grid[next_x, next_y].v == - grid[x, y].v): grid[x, y ] =", "!= False: x1, y1 = grid[coord].pt1 x2, y2 = grid[coord].pt2", "import domino import numpy as np import numpy.lib.arraysetops as aso", "(x+1,y), (x,y+1), (x+1,y+1)] try: should_create_a_block = ~np.sum(np.array(list(map(grid.get, idx))), dtype =", "= False grid[x2, y2] = False for coord in temp_grid:", "aztec_grid(order, only_new_blocks = True): grid_X, grid_Y = np.meshgrid(np.arange(2 * order)", "(x,y) in zip(X,Y): try: if ~grid[x, y]: idx = [(x,y),", "+ grid[x, y].v if (grid[next_x, next_y] != False): if all(grid[next_x,", "Y[idx] def add_to_grid(tiles, grid): for tile in tiles: grid[tile.pt1[0], tile.pt1[1]]", "grid_Y.flatten()]).T center_pts = center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))] X = center_pts[:,0] Y =", "y] = False return grid def enlarge_grid(grid, order): X_aztec, Y_aztec", "utf-8 -*- \"\"\" Created on Wed Dec 30 22:04:48 2020", "{} for coord in grid: if grid[coord] != False: x1,", "grid[coord] != False: x1, y1 = grid[coord].pt1 x2, y2 =", "add_to_grid(spawn_block(x, y), grid) except: pass except: pass return grid def", "center_pts = [*grid] X_aztec, Y_aztec = aztec_grid(order) center_pts_aztec = [tuple([x,y])", "next_x, next_y = np.array([x, y]) + grid[x, y].v if (grid[next_x,", "np.array([*grid]) center_pts = center_pts[np.lexsort((center_pts[:, 1], center_pts[:, 0]))] X = center_pts[:,", "1] for (x,y) in zip(X,Y): try: next_x, next_y = np.array([x,", "= (np.abs(X) + np.abs(Y) <= order) & (np.abs(X) + np.abs(Y)", "np.array([0,-1])) d2 = domino.domino(np.array([x, y + 1]), np.array([x + 1,", "center_pts[:, 0] Y = center_pts[:, 1] for (x,y) in zip(X,Y):", "idx = np.abs(X) + np.abs(Y) <= order return X[idx], Y[idx]", "= center_pts[:, 1] for (x,y) in zip(X,Y): try: if ~grid[x,", "d1 = domino.domino(np.array([x, y]), np.array([x + 1, y]), np.array([0,-1])) d2", "order) - (2 * order - 1)/2) center_pts = np.array([grid_X.flatten(),", "1, y]), np.array([x + 1, y + 1]), np.array([ 1,0]))", "center_pts[np.lexsort((center_pts[:, 1], center_pts[:, 0]))] X = center_pts[:, 0] Y =", "d2 = domino.domino(np.array([x + 1, y]), np.array([x + 1, y", "np.abs(Y) > order - 1) else: idx = np.abs(X) +", "(2 * order - 1)/2) center_pts = np.array([grid_X.flatten(), grid_Y.flatten()]).T center_pts", "return grid def generate_good_block(grid): center_pts = np.array([*grid]) center_pts = center_pts[np.lexsort((center_pts[:,", "temp_grid: grid[coord] = temp_grid[coord] return grid def destroy_bad_blocks(grid): center_pts =", "= center_pts[:,0] Y = center_pts[:,1] if only_new_blocks: idx = (np.abs(X)", "* order) - (2 * order - 1)/2 , np.arange(2", "center_pts[:,1] if only_new_blocks: idx = (np.abs(X) + np.abs(Y) <= order)", "= False grid[next_x, next_y] = False except: pass return grid", "False for coord in temp_grid: grid[coord] = temp_grid[coord] return grid", "0]))] X = center_pts[:, 0] Y = center_pts[:, 1] for", "for tile in tiles: grid[tile.pt1[0], tile.pt1[1]] = tile grid[tile.pt2[0], tile.pt2[1]]", "domino.domino(np.array([x + 1, y]), np.array([x + 1, y + 1]),", "curr_order): temp_grid = {} for coord in grid: if grid[coord]", "for (x,y) in zip(X,Y): try: if ~grid[x, y]: idx =", "(x+1,y+1)] try: should_create_a_block = ~np.sum(np.array(list(map(grid.get, idx))), dtype = bool) if", "= center_pts[:, 0] Y = center_pts[:, 1] for (x,y) in", "as np import numpy.lib.arraysetops as aso def spawn_block(x, y): if", "tile.pt1[1]] = tile grid[tile.pt2[0], tile.pt2[1]] = tile return grid def", "dtype = bool) if should_create_a_block: grid = add_to_grid(spawn_block(x, y), grid)", "0.5: d1 = domino.domino(np.array([x, y]), np.array([x + 1, y]), np.array([0,-1]))", "(np.abs(X) + np.abs(Y) <= order) & (np.abs(X) + np.abs(Y) >", "x2, y2 = grid[coord].pt2 grid[coord].move() temp_grid = add_to_grid([grid[coord]], temp_grid) grid[x1,", "22:04:48 2020 @author: baptistelafoux \"\"\" import domino import numpy as", "= domino.domino(np.array([x, y + 1]), np.array([x + 1, y +", "grid[x, y ] = False grid[next_x, next_y] = False except:", "-*- coding: utf-8 -*- \"\"\" Created on Wed Dec 30", "coding: utf-8 -*- \"\"\" Created on Wed Dec 30 22:04:48", "Dec 30 22:04:48 2020 @author: baptistelafoux \"\"\" import domino import", "import numpy.lib.arraysetops as aso def spawn_block(x, y): if np.random.rand() >", "= aztec_grid(order, True) for (x,y) in zip(X_aztec, Y_aztec): grid[x, y]", "def aztec_grid(order, only_new_blocks = True): grid_X, grid_Y = np.meshgrid(np.arange(2 *", "== - grid[x, y].v): grid[x, y ] = False grid[next_x,", "<= order) & (np.abs(X) + np.abs(Y) > order - 1)", "move_tiles(grid, curr_order): temp_grid = {} for coord in grid: if", "y]), np.array([x + 1, y]), np.array([0,-1])) d2 = domino.domino(np.array([x, y", "Y_aztec): grid[x, y] = False return grid def move_tiles(grid, curr_order):", "d1 = domino.domino(np.array([x, y]), np.array([x, y + 1]), np.array([-1,0])) d2", "generate_good_block(grid): center_pts = np.array([*grid]) center_pts = center_pts[np.lexsort((center_pts[:, 1], center_pts[:, 0]))]", "grid def destroy_bad_blocks(grid): center_pts = np.array([*grid]) X = center_pts[:, 0]", "np.array([x + 1, y + 1]), np.array([0, 1])) else: d1", "y1 = grid[coord].pt1 x2, y2 = grid[coord].pt2 grid[coord].move() temp_grid =", "coord in grid: if grid[coord] != False: x1, y1 =", "- grid[x, y].v): grid[x, y ] = False grid[next_x, next_y]", "return grid def move_tiles(grid, curr_order): temp_grid = {} for coord", "else: for (x,y) in zip(X_aztec, Y_aztec): grid[x, y] = False", "list(diff_array): grid[x, y] = False else: for (x,y) in zip(X_aztec,", "1,0])) return [d1, d2] def aztec_grid(order, only_new_blocks = True): grid_X,", "enlarge_grid_deprec(grid, order): center_pts = [*grid] X_aztec, Y_aztec = aztec_grid(order) center_pts_aztec", "domino.domino(np.array([x, y + 1]), np.array([x + 1, y + 1]),", "= domino.domino(np.array([x, y]), np.array([x + 1, y]), np.array([0,-1])) d2 =", "= False return grid def enlarge_grid(grid, order): X_aztec, Y_aztec =", "grid[x, y] = False return grid def move_tiles(grid, curr_order): temp_grid", "np.arange(2 * order) - (2 * order - 1)/2) center_pts", "if all(grid[next_x, next_y].v == - grid[x, y].v): grid[x, y ]", "def add_to_grid(tiles, grid): for tile in tiles: grid[tile.pt1[0], tile.pt1[1]] =", "1)/2 , np.arange(2 * order) - (2 * order -", "& (np.abs(X) + np.abs(Y) > order - 1) else: idx", "else: idx = np.abs(X) + np.abs(Y) <= order return X[idx],", "np.array([grid_X.flatten(), grid_Y.flatten()]).T center_pts = center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))] X = center_pts[:,0] Y", "= [tuple([x,y]) for (x,y) in zip(X_aztec, Y_aztec)] diff_array = set(center_pts_aztec)", "= add_to_grid(spawn_block(x, y), grid) except: pass except: pass return grid", "False return grid def enlarge_grid(grid, order): X_aztec, Y_aztec = aztec_grid(order,", "-*- \"\"\" Created on Wed Dec 30 22:04:48 2020 @author:", "- set(center_pts) if order > 1: for x, y in", "for (x,y) in zip(X_aztec, Y_aztec): grid[x, y] = False return", "(grid[next_x, next_y] != False): if all(grid[next_x, next_y].v == - grid[x,", "1] for (x,y) in zip(X,Y): try: if ~grid[x, y]: idx", "in zip(X_aztec, Y_aztec): grid[x, y] = False return grid def", "grid def enlarge_grid_deprec(grid, order): center_pts = [*grid] X_aztec, Y_aztec =", "return grid def enlarge_grid(grid, order): X_aztec, Y_aztec = aztec_grid(order, True)", "= center_pts[:, 1] for (x,y) in zip(X,Y): try: next_x, next_y", "idx))), dtype = bool) if should_create_a_block: grid = add_to_grid(spawn_block(x, y),", "import numpy as np import numpy.lib.arraysetops as aso def spawn_block(x,", "aso def spawn_block(x, y): if np.random.rand() > 0.5: d1 =", "(x,y) in zip(X_aztec, Y_aztec): grid[x, y] = False return grid", "Created on Wed Dec 30 22:04:48 2020 @author: baptistelafoux \"\"\"", "grid[x, y] = False return grid def enlarge_grid(grid, order): X_aztec,", "y): if np.random.rand() > 0.5: d1 = domino.domino(np.array([x, y]), np.array([x", "y + 1]), np.array([-1,0])) d2 = domino.domino(np.array([x + 1, y]),", "bool) if should_create_a_block: grid = add_to_grid(spawn_block(x, y), grid) except: pass", "if np.random.rand() > 0.5: d1 = domino.domino(np.array([x, y]), np.array([x +", "d2 = domino.domino(np.array([x, y + 1]), np.array([x + 1, y", "!= False): if all(grid[next_x, next_y].v == - grid[x, y].v): grid[x,", "False return grid def move_tiles(grid, curr_order): temp_grid = {} for", "order) - (2 * order - 1)/2 , np.arange(2 *", "= set(center_pts_aztec) - set(center_pts) if order > 1: for x,", "baptistelafoux \"\"\" import domino import numpy as np import numpy.lib.arraysetops", "= center_pts[np.lexsort((center_pts[:, 1], center_pts[:, 0]))] X = center_pts[:, 0] Y", "Y = center_pts[:,1] if only_new_blocks: idx = (np.abs(X) + np.abs(Y)", "only_new_blocks = True): grid_X, grid_Y = np.meshgrid(np.arange(2 * order) -", "tile in tiles: grid[tile.pt1[0], tile.pt1[1]] = tile grid[tile.pt2[0], tile.pt2[1]] =", "@author: baptistelafoux \"\"\" import domino import numpy as np import", "np.array([x, y + 1]), np.array([-1,0])) d2 = domino.domino(np.array([x + 1,", "for (x,y) in zip(X_aztec, Y_aztec)] diff_array = set(center_pts_aztec) - set(center_pts)", "Y_aztec): grid[x, y] = False return grid def enlarge_grid(grid, order):", "\"\"\" Created on Wed Dec 30 22:04:48 2020 @author: baptistelafoux", "should_create_a_block = ~np.sum(np.array(list(map(grid.get, idx))), dtype = bool) if should_create_a_block: grid", "<filename>shuffling_algorithm.py #!/usr/bin/env python3 # -*- coding: utf-8 -*- \"\"\" Created", "on Wed Dec 30 22:04:48 2020 @author: baptistelafoux \"\"\" import", "order return X[idx], Y[idx] def add_to_grid(tiles, grid): for tile in", "np import numpy.lib.arraysetops as aso def spawn_block(x, y): if np.random.rand()", "for coord in temp_grid: grid[coord] = temp_grid[coord] return grid def", "zip(X,Y): try: if ~grid[x, y]: idx = [(x,y), (x+1,y), (x,y+1),", "np.random.rand() > 0.5: d1 = domino.domino(np.array([x, y]), np.array([x + 1,", "order - 1)/2 , np.arange(2 * order) - (2 *", "grid[coord].pt1 x2, y2 = grid[coord].pt2 grid[coord].move() temp_grid = add_to_grid([grid[coord]], temp_grid)", "center_pts_aztec = [tuple([x,y]) for (x,y) in zip(X_aztec, Y_aztec)] diff_array =", "np.abs(X) + np.abs(Y) <= order return X[idx], Y[idx] def add_to_grid(tiles,", "y]), np.array([0,-1])) d2 = domino.domino(np.array([x, y + 1]), np.array([x +", "- (2 * order - 1)/2 , np.arange(2 * order)", "tiles: grid[tile.pt1[0], tile.pt1[1]] = tile grid[tile.pt2[0], tile.pt2[1]] = tile return", "x, y in list(diff_array): grid[x, y] = False else: for", "domino.domino(np.array([x, y]), np.array([x, y + 1]), np.array([-1,0])) d2 = domino.domino(np.array([x", "grid_Y = np.meshgrid(np.arange(2 * order) - (2 * order -", "grid def generate_good_block(grid): center_pts = np.array([*grid]) center_pts = center_pts[np.lexsort((center_pts[:, 1],", "= tile return grid def generate_good_block(grid): center_pts = np.array([*grid]) center_pts", "X[idx], Y[idx] def add_to_grid(tiles, grid): for tile in tiles: grid[tile.pt1[0],", "if ~grid[x, y]: idx = [(x,y), (x+1,y), (x,y+1), (x+1,y+1)] try:", "[*grid] X_aztec, Y_aztec = aztec_grid(order) center_pts_aztec = [tuple([x,y]) for (x,y)", "y]: idx = [(x,y), (x+1,y), (x,y+1), (x+1,y+1)] try: should_create_a_block =", "return grid def destroy_bad_blocks(grid): center_pts = np.array([*grid]) X = center_pts[:,", "(x,y) in zip(X_aztec, Y_aztec)] diff_array = set(center_pts_aztec) - set(center_pts) if", "grid = add_to_grid(spawn_block(x, y), grid) except: pass except: pass return", "grid def move_tiles(grid, curr_order): temp_grid = {} for coord in", "y] = False else: for (x,y) in zip(X_aztec, Y_aztec): grid[x,", "domino import numpy as np import numpy.lib.arraysetops as aso def", "grid[coord].move() temp_grid = add_to_grid([grid[coord]], temp_grid) grid[x1, y1] = False grid[x2,", "if grid[coord] != False: x1, y1 = grid[coord].pt1 x2, y2", "grid[tile.pt2[0], tile.pt2[1]] = tile return grid def generate_good_block(grid): center_pts =", "= np.abs(X) + np.abs(Y) <= order return X[idx], Y[idx] def", "Y_aztec = aztec_grid(order) center_pts_aztec = [tuple([x,y]) for (x,y) in zip(X_aztec,", "next_y] != False): if all(grid[next_x, next_y].v == - grid[x, y].v):", "y + 1]), np.array([x + 1, y + 1]), np.array([0,", "next_y].v == - grid[x, y].v): grid[x, y ] = False", "= False for coord in temp_grid: grid[coord] = temp_grid[coord] return", "1, y]), np.array([0,-1])) d2 = domino.domino(np.array([x, y + 1]), np.array([x", "<= order return X[idx], Y[idx] def add_to_grid(tiles, grid): for tile", "- 1)/2 , np.arange(2 * order) - (2 * order", "temp_grid) grid[x1, y1] = False grid[x2, y2] = False for", "tile.pt2[1]] = tile return grid def generate_good_block(grid): center_pts = np.array([*grid])", "+ 1, y]), np.array([x + 1, y + 1]), np.array([", "pass except: pass return grid def enlarge_grid_deprec(grid, order): center_pts =", "if (grid[next_x, next_y] != False): if all(grid[next_x, next_y].v == -", "idx = [(x,y), (x+1,y), (x,y+1), (x+1,y+1)] try: should_create_a_block = ~np.sum(np.array(list(map(grid.get,", "idx = (np.abs(X) + np.abs(Y) <= order) & (np.abs(X) +", "= grid[coord].pt2 grid[coord].move() temp_grid = add_to_grid([grid[coord]], temp_grid) grid[x1, y1] =", "in zip(X_aztec, Y_aztec)] diff_array = set(center_pts_aztec) - set(center_pts) if order", "order - 1)/2) center_pts = np.array([grid_X.flatten(), grid_Y.flatten()]).T center_pts = center_pts[np.lexsort((center_pts[:,1],", "np.array([*grid]) X = center_pts[:, 0] Y = center_pts[:, 1] for", "zip(X,Y): try: next_x, next_y = np.array([x, y]) + grid[x, y].v", "Y = center_pts[:, 1] for (x,y) in zip(X,Y): try: if", "np.array([x, y]) + grid[x, y].v if (grid[next_x, next_y] != False):", "def destroy_bad_blocks(grid): center_pts = np.array([*grid]) X = center_pts[:, 0] Y", "return X[idx], Y[idx] def add_to_grid(tiles, grid): for tile in tiles:", "1, y + 1]), np.array([0, 1])) else: d1 = domino.domino(np.array([x,", "+ 1]), np.array([ 1,0])) return [d1, d2] def aztec_grid(order, only_new_blocks", "np.array([ 1,0])) return [d1, d2] def aztec_grid(order, only_new_blocks = True):", "y), grid) except: pass except: pass return grid def enlarge_grid_deprec(grid,", "+ 1, y + 1]), np.array([0, 1])) else: d1 =", "center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))] X = center_pts[:,0] Y = center_pts[:,1] if only_new_blocks:", "(x,y) in zip(X,Y): try: next_x, next_y = np.array([x, y]) +", "temp_grid[coord] return grid def destroy_bad_blocks(grid): center_pts = np.array([*grid]) X =", "* order - 1)/2) center_pts = np.array([grid_X.flatten(), grid_Y.flatten()]).T center_pts =", "add_to_grid([grid[coord]], temp_grid) grid[x1, y1] = False grid[x2, y2] = False", "= False else: for (x,y) in zip(X_aztec, Y_aztec): grid[x, y]", "y + 1]), np.array([0, 1])) else: d1 = domino.domino(np.array([x, y]),", "in zip(X,Y): try: if ~grid[x, y]: idx = [(x,y), (x+1,y),", "[d1, d2] def aztec_grid(order, only_new_blocks = True): grid_X, grid_Y =", "for (x,y) in zip(X,Y): try: next_x, next_y = np.array([x, y])", "np.abs(Y) <= order) & (np.abs(X) + np.abs(Y) > order -", "aztec_grid(order) center_pts_aztec = [tuple([x,y]) for (x,y) in zip(X_aztec, Y_aztec)] diff_array", "for coord in grid: if grid[coord] != False: x1, y1", "y1] = False grid[x2, y2] = False for coord in", ", np.arange(2 * order) - (2 * order - 1)/2)", "y + 1]), np.array([ 1,0])) return [d1, d2] def aztec_grid(order,", "try: should_create_a_block = ~np.sum(np.array(list(map(grid.get, idx))), dtype = bool) if should_create_a_block:", "grid[x, y].v): grid[x, y ] = False grid[next_x, next_y] =", "spawn_block(x, y): if np.random.rand() > 0.5: d1 = domino.domino(np.array([x, y]),", "destroy_bad_blocks(grid): center_pts = np.array([*grid]) X = center_pts[:, 0] Y =", "if only_new_blocks: idx = (np.abs(X) + np.abs(Y) <= order) &", "= tile grid[tile.pt2[0], tile.pt2[1]] = tile return grid def generate_good_block(grid):", "except: pass return grid def enlarge_grid_deprec(grid, order): center_pts = [*grid]", "center_pts = np.array([*grid]) center_pts = center_pts[np.lexsort((center_pts[:, 1], center_pts[:, 0]))] X", "= np.array([grid_X.flatten(), grid_Y.flatten()]).T center_pts = center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))] X = center_pts[:,0]", "1: for x, y in list(diff_array): grid[x, y] = False", "= np.array([*grid]) X = center_pts[:, 0] Y = center_pts[:, 1]", "zip(X_aztec, Y_aztec)] diff_array = set(center_pts_aztec) - set(center_pts) if order >", "domino.domino(np.array([x, y]), np.array([x + 1, y]), np.array([0,-1])) d2 = domino.domino(np.array([x,", "X = center_pts[:, 0] Y = center_pts[:, 1] for (x,y)", "y2] = False for coord in temp_grid: grid[coord] = temp_grid[coord]", "enlarge_grid(grid, order): X_aztec, Y_aztec = aztec_grid(order, True) for (x,y) in", "python3 # -*- coding: utf-8 -*- \"\"\" Created on Wed", "= False return grid def move_tiles(grid, curr_order): temp_grid = {}", "= {} for coord in grid: if grid[coord] != False:", "x1, y1 = grid[coord].pt1 x2, y2 = grid[coord].pt2 grid[coord].move() temp_grid", "1, y + 1]), np.array([ 1,0])) return [d1, d2] def", "~grid[x, y]: idx = [(x,y), (x+1,y), (x,y+1), (x+1,y+1)] try: should_create_a_block", "grid): for tile in tiles: grid[tile.pt1[0], tile.pt1[1]] = tile grid[tile.pt2[0],", "grid[x, y] = False else: for (x,y) in zip(X_aztec, Y_aztec):", "- 1) else: idx = np.abs(X) + np.abs(Y) <= order", "return grid def enlarge_grid_deprec(grid, order): center_pts = [*grid] X_aztec, Y_aztec", "order): center_pts = [*grid] X_aztec, Y_aztec = aztec_grid(order) center_pts_aztec =", "X = center_pts[:,0] Y = center_pts[:,1] if only_new_blocks: idx =", "[tuple([x,y]) for (x,y) in zip(X_aztec, Y_aztec)] diff_array = set(center_pts_aztec) -", "(np.abs(X) + np.abs(Y) > order - 1) else: idx =", "tile grid[tile.pt2[0], tile.pt2[1]] = tile return grid def generate_good_block(grid): center_pts", "1) else: idx = np.abs(X) + np.abs(Y) <= order return", "Y_aztec)] diff_array = set(center_pts_aztec) - set(center_pts) if order > 1:", "y2 = grid[coord].pt2 grid[coord].move() temp_grid = add_to_grid([grid[coord]], temp_grid) grid[x1, y1]", "np.array([x + 1, y + 1]), np.array([ 1,0])) return [d1,", "= bool) if should_create_a_block: grid = add_to_grid(spawn_block(x, y), grid) except:", "grid[coord].pt2 grid[coord].move() temp_grid = add_to_grid([grid[coord]], temp_grid) grid[x1, y1] = False", "grid[coord] = temp_grid[coord] return grid def destroy_bad_blocks(grid): center_pts = np.array([*grid])", "1]), np.array([ 1,0])) return [d1, d2] def aztec_grid(order, only_new_blocks =", "1]), np.array([-1,0])) d2 = domino.domino(np.array([x + 1, y]), np.array([x +", "center_pts = center_pts[np.lexsort((center_pts[:,1], center_pts[:,0]))] X = center_pts[:,0] Y = center_pts[:,1]", "= grid[coord].pt1 x2, y2 = grid[coord].pt2 grid[coord].move() temp_grid = add_to_grid([grid[coord]],", "grid) except: pass except: pass return grid def enlarge_grid_deprec(grid, order):", "order) & (np.abs(X) + np.abs(Y) > order - 1) else:", "except: pass except: pass return grid def enlarge_grid_deprec(grid, order): center_pts", "order): X_aztec, Y_aztec = aztec_grid(order, True) for (x,y) in zip(X_aztec,", "* order) - (2 * order - 1)/2) center_pts =", "= [*grid] X_aztec, Y_aztec = aztec_grid(order) center_pts_aztec = [tuple([x,y]) for", "= True): grid_X, grid_Y = np.meshgrid(np.arange(2 * order) - (2", "grid_X, grid_Y = np.meshgrid(np.arange(2 * order) - (2 * order", "> order - 1) else: idx = np.abs(X) + np.abs(Y)", "0] Y = center_pts[:, 1] for (x,y) in zip(X,Y): try:", "numpy as np import numpy.lib.arraysetops as aso def spawn_block(x, y):", "+ 1]), np.array([x + 1, y + 1]), np.array([0, 1]))", "set(center_pts_aztec) - set(center_pts) if order > 1: for x, y", "+ 1, y]), np.array([0,-1])) d2 = domino.domino(np.array([x, y + 1]),", "order > 1: for x, y in list(diff_array): grid[x, y]", "+ 1, y + 1]), np.array([ 1,0])) return [d1, d2]", "def spawn_block(x, y): if np.random.rand() > 0.5: d1 = domino.domino(np.array([x,", "~np.sum(np.array(list(map(grid.get, idx))), dtype = bool) if should_create_a_block: grid = add_to_grid(spawn_block(x,", "- (2 * order - 1)/2) center_pts = np.array([grid_X.flatten(), grid_Y.flatten()]).T", "False: x1, y1 = grid[coord].pt1 x2, y2 = grid[coord].pt2 grid[coord].move()", "+ 1]), np.array([-1,0])) d2 = domino.domino(np.array([x + 1, y]), np.array([x", "grid[x1, y1] = False grid[x2, y2] = False for coord", "+ 1]), np.array([0, 1])) else: d1 = domino.domino(np.array([x, y]), np.array([x,", "] = False grid[next_x, next_y] = False except: pass return", "+ np.abs(Y) <= order return X[idx], Y[idx] def add_to_grid(tiles, grid):", "grid def enlarge_grid(grid, order): X_aztec, Y_aztec = aztec_grid(order, True) for" ]
[ "get_distance_matrix(cell_grid_x, cell_grid_y, x, y): return np.sqrt((x - cell_grid_x) ** 2", "def get_distance_matrix(cell_grid_x, cell_grid_y, x, y): return np.sqrt((x - cell_grid_x) **", "numpy as np def get_position_of_minimum(matrix): return np.unravel_index(np.nanargmin(matrix), matrix.shape) def get_position_of_maximum(matrix):", "cell_grid_y, x, y): return np.sqrt((x - cell_grid_x) ** 2 +", "(x - cell_grid_x) ** 2 + (y - cell_grid_y) **", "def get_position_of_maximum(matrix): return np.unravel_index(np.nanargmax(matrix), matrix.shape) def get_distance_matrix(cell_grid_x, cell_grid_y, x, y):", "return np.unravel_index(np.nanargmax(matrix), matrix.shape) def get_distance_matrix(cell_grid_x, cell_grid_y, x, y): return np.sqrt((x", "cell_grid_x) ** 2 + (y - cell_grid_y) ** 2) def", "cell_grid_y) ** 2) def get_distance_matrix_squared(cell_grid_x, cell_grid_y, x, y): return (x", "x, y): return np.sqrt((x - cell_grid_x) ** 2 + (y", "x, y): return (x - cell_grid_x) ** 2 + (y", "2) def get_distance_matrix_squared(cell_grid_x, cell_grid_y, x, y): return (x - cell_grid_x)", "cell_grid_y, x, y): return (x - cell_grid_x) ** 2 +", "** 2) def get_distance_matrix_squared(cell_grid_x, cell_grid_y, x, y): return (x -", "np.sqrt((x - cell_grid_x) ** 2 + (y - cell_grid_y) **", "np def get_position_of_minimum(matrix): return np.unravel_index(np.nanargmin(matrix), matrix.shape) def get_position_of_maximum(matrix): return np.unravel_index(np.nanargmax(matrix),", "def get_distance_matrix_squared(cell_grid_x, cell_grid_y, x, y): return (x - cell_grid_x) **", "matrix.shape) def get_position_of_maximum(matrix): return np.unravel_index(np.nanargmax(matrix), matrix.shape) def get_distance_matrix(cell_grid_x, cell_grid_y, x,", "** 2 + (y - cell_grid_y) ** 2) def get_distance_matrix_squared(cell_grid_x,", "matrix.shape) def get_distance_matrix(cell_grid_x, cell_grid_y, x, y): return np.sqrt((x - cell_grid_x)", "2 + (y - cell_grid_y) ** 2) def get_distance_matrix_squared(cell_grid_x, cell_grid_y,", "get_position_of_minimum(matrix): return np.unravel_index(np.nanargmin(matrix), matrix.shape) def get_position_of_maximum(matrix): return np.unravel_index(np.nanargmax(matrix), matrix.shape) def", "return (x - cell_grid_x) ** 2 + (y - cell_grid_y)", "return np.unravel_index(np.nanargmin(matrix), matrix.shape) def get_position_of_maximum(matrix): return np.unravel_index(np.nanargmax(matrix), matrix.shape) def get_distance_matrix(cell_grid_x,", "- cell_grid_x) ** 2 + (y - cell_grid_y) ** 2)", "- cell_grid_x) ** 2 + (y - cell_grid_y) ** 2", "+ (y - cell_grid_y) ** 2) def get_distance_matrix_squared(cell_grid_x, cell_grid_y, x,", "def get_position_of_minimum(matrix): return np.unravel_index(np.nanargmin(matrix), matrix.shape) def get_position_of_maximum(matrix): return np.unravel_index(np.nanargmax(matrix), matrix.shape)", "import numpy as np def get_position_of_minimum(matrix): return np.unravel_index(np.nanargmin(matrix), matrix.shape) def", "y): return (x - cell_grid_x) ** 2 + (y -", "get_position_of_maximum(matrix): return np.unravel_index(np.nanargmax(matrix), matrix.shape) def get_distance_matrix(cell_grid_x, cell_grid_y, x, y): return", "- cell_grid_y) ** 2) def get_distance_matrix_squared(cell_grid_x, cell_grid_y, x, y): return", "(y - cell_grid_y) ** 2) def get_distance_matrix_squared(cell_grid_x, cell_grid_y, x, y):", "y): return np.sqrt((x - cell_grid_x) ** 2 + (y -", "np.unravel_index(np.nanargmin(matrix), matrix.shape) def get_position_of_maximum(matrix): return np.unravel_index(np.nanargmax(matrix), matrix.shape) def get_distance_matrix(cell_grid_x, cell_grid_y,", "get_distance_matrix_squared(cell_grid_x, cell_grid_y, x, y): return (x - cell_grid_x) ** 2", "return np.sqrt((x - cell_grid_x) ** 2 + (y - cell_grid_y)", "as np def get_position_of_minimum(matrix): return np.unravel_index(np.nanargmin(matrix), matrix.shape) def get_position_of_maximum(matrix): return", "np.unravel_index(np.nanargmax(matrix), matrix.shape) def get_distance_matrix(cell_grid_x, cell_grid_y, x, y): return np.sqrt((x -" ]
[ "distutils.core import setup from Cython.Build import cythonize setup(ext_modules = cythonize([\"license_chk.py\"]))", "<gh_stars>0 from distutils.core import setup from Cython.Build import cythonize setup(ext_modules", "from distutils.core import setup from Cython.Build import cythonize setup(ext_modules =" ]
[ "# Copyright 2013 Nicira, Inc. # All Rights Reserved #", "# Attribute Map RESOURCE_ATTRIBUTE_MAP = { 'qos_queues': { 'id': {'allow_post':", "= 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { RXTX_FACTOR: {'allow_post': True,", "= _(\"Invalid bandwidth rate, %(data)s must be a non negative\"", "_(\"Invalid bandwidth rate, %(data)s must be a non negative\" \"", "qos_queue_create = \"create_qos_queue\" qos_queue_delete = \"delete_qos_queue\" qos_queue_get = \"get_qos_queue\" qos_queue_list", "delete_qos_queue(self, context, id): pass @abstractmethod def get_qos_queue(self, context, id, fields=None):", "'queue_id' RXTX_FACTOR = 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { RXTX_FACTOR:", "True, 'allow_put': False, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''},", "ValueError except (ValueError, TypeError): msg = _(\"'%s' must be a", "quantum.api.v2 import attributes as attr from quantum.api.v2 import base from", "_(\"Need to be admin in order to create queue called", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "'default': ''}, 'min': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default':", "specific language governing permissions and limitations # under the License.", "True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'max': {'allow_post': True, 'allow_put': False,", "1, 'convert_to': convert_to_unsigned_int_or_none}, QUEUE: {'allow_post': False, 'allow_put': False, 'is_visible': True,", "'default': 'untrusted', 'is_visible': True}, 'dscp': {'allow_post': True, 'allow_put': False, 'is_visible':", "# not use this file except in compliance with the", "message = _(\"Invalid bandwidth rate, min greater than max.\") class", "'allow_put': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True, 'default': False}, 'name': {'allow_post':", "'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'max': {'allow_post': True, 'allow_put': False, 'is_visible':", "max.\") class QueueInvalidBandwidth(qexception.InvalidInput): message = _(\"Invalid bandwidth rate, %(data)s must", "True, 'default': False}} } class Nvp_qos(object): \"\"\"Port Queue extension.\"\"\" @classmethod", "tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Nicira, Inc. # All", "\"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) controller = base.create_resource(collection_name,", "id): pass @abstractmethod def get_qos_queue(self, context, id, fields=None): pass @abstractmethod", "_(\"Queue %(id)s does not exist\") class QueueInUseByPort(qexception.InUse): message = _(\"Unable", "in compliance with the License. You may obtain # a", "False, 'is_visible': False, 'default': 1, 'convert_to': convert_to_unsigned_int_or_none}, QUEUE: {'allow_post': False,", "policy.json/Auth qos_queue_create = \"create_qos_queue\" qos_queue_delete = \"delete_qos_queue\" qos_queue_get = \"get_qos_queue\"", "port.\") class QueuePortBindingNotFound(qexception.NotFound): message = _(\"Port is not associated with", "You may obtain # a copy of the License at", "to delete queue attached to port.\") class QueuePortBindingNotFound(qexception.NotFound): message =", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "TypeError): msg = _(\"'%s' must be a non negative integer.\")", "the License. # # @author: <NAME>, Nicira Networks, Inc. from", "%(id)s does not exist\") class QueueInUseByPort(qexception.InUse): message = _(\"Unable to", "message = _(\"Queue %(id)s does not exist\") class QueueInUseByPort(qexception.InUse): message", "= resource_name.replace('_', '-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\",", "= _(\"'%s' must be a non negative integer.\") % val", "does not exist\") class QueueInUseByPort(qexception.InUse): message = _(\"Unable to delete", "under the License is distributed on an \"AS IS\" BASIS,", "@abstractmethod def get_qos_queue(self, context, id, fields=None): pass @abstractmethod def get_qos_queues(self,", "needed when QoS workload marked trusted\") class QueueNotFound(qexception.NotFound): message =", "'qos_queue' collection_name = resource_name.replace('_', '-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name", "message = _(\"Invalid bandwidth rate, %(data)s must be a non", "def convert_to_unsigned_int_or_none(val): if val is None: return try: val =", "Inc. # All Rights Reserved # # Licensed under the", "'dscp': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to':", "val = int(val) if val < 0: raise ValueError except", "qos_queue_list = \"get_qos_queues\" class DefaultQueueCreateNotAdmin(qexception.InUse): message = _(\"Need to be", "allow_bulk=False) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts def get_extended_resources(self,", "= { 'ports': { RXTX_FACTOR: {'allow_post': True, 'allow_put': False, 'is_visible':", "class QueueInvalidBandwidth(qexception.InvalidInput): message = _(\"Invalid bandwidth rate, %(data)s must be", "True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible': True},", "DefaultQueueAlreadyExists(qexception.InUse): message = _(\"Default queue already exists.\") class QueueInvalidDscp(qexception.InvalidInput): message", "message = _(\"Invalid value for dscp %(data)s must be integer.\")", "= _(\"No DSCP field needed when QoS workload marked trusted\")", "this file except in compliance with the License. You may", "class QueueMinGreaterMax(qexception.InvalidInput): message = _(\"Invalid bandwidth rate, min greater than", "is not associated with lqueue\") def convert_to_unsigned_int_or_none(val): if val is", "False, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'min': {'allow_post':", "qexception.InvalidInput(error_message=msg) return val # Attribute Map RESOURCE_ATTRIBUTE_MAP = { 'qos_queues':", "'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'max': {'allow_post': True, 'allow_put':", "} class Nvp_qos(object): \"\"\"Port Queue extension.\"\"\" @classmethod def get_name(cls): return", "'convert_to': convert_to_unsigned_int_or_none}, 'max': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default':", "software # distributed under the License is distributed on an", "(the \"License\"); you may # not use this file except", "if version == \"2.0\": return dict(EXTENDED_ATTRIBUTES_2_0.items() + RESOURCE_ATTRIBUTE_MAP.items()) else: return", "{} class QueuePluginBase(object): @abstractmethod def create_qos_queue(self, context, queue): pass @abstractmethod", "_(\"Default queue already exists.\") class QueueInvalidDscp(qexception.InvalidInput): message = _(\"Invalid value", "return {} class QueuePluginBase(object): @abstractmethod def create_qos_queue(self, context, queue): pass", "EXTENDED_ATTRIBUTES_2_0 = { 'ports': { RXTX_FACTOR: {'allow_post': True, 'allow_put': False,", "get_namespace(cls): return \"http://docs.openstack.org/ext/nvp-qos/api/v2.0\" @classmethod def get_updated(cls): return \"2012-10-05T10:00:00-00:00\" @classmethod def", "extension.\"\"\" @classmethod def get_name(cls): return \"nvp-qos\" @classmethod def get_alias(cls): return", "def delete_qos_queue(self, context, id): pass @abstractmethod def get_qos_queue(self, context, id,", "class QueueInUseByPort(qexception.InUse): message = _(\"Unable to delete queue attached to", "under the License. # # @author: <NAME>, Nicira Networks, Inc.", "file except in compliance with the License. You may obtain", "= 'qos_queue' collection_name = resource_name.replace('_', '-') + \"s\" params =", "quantum.api import extensions from quantum.api.v2 import attributes as attr from", "manager # For policy.json/Auth qos_queue_create = \"create_qos_queue\" qos_queue_delete = \"delete_qos_queue\"", "dscp %(data)s must be integer.\") class QueueMinGreaterMax(qexception.InvalidInput): message = _(\"Invalid", "'validate': {'type:string': None}, 'is_visible': True}, }, } QUEUE = 'queue_id'", "'convert_to': convert_to_unsigned_int_or_none}, QUEUE: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default':", "OR CONDITIONS OF ANY KIND, either express or implied. See", "the specific language governing permissions and limitations # under the", "bandwidth rate, min greater than max.\") class QueueInvalidBandwidth(qexception.InvalidInput): message =", "under the Apache License, Version 2.0 (the \"License\"); you may", "'0', 'convert_to': convert_to_unsigned_int_or_none}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True,", "convert_to_unsigned_int_or_none}, 'max': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None,", "'is_visible': True}, 'dscp': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default':", "exists.\") class QueueInvalidDscp(qexception.InvalidInput): message = _(\"Invalid value for dscp %(data)s", "attached to port.\") class QueuePortBindingNotFound(qexception.NotFound): message = _(\"Port is not", "RESOURCE_ATTRIBUTE_MAP.items()) else: return {} class QueuePluginBase(object): @abstractmethod def create_qos_queue(self, context,", "exts.append(ex) return exts def get_extended_resources(self, version): if version == \"2.0\":", "a non negative integer.\") % val raise qexception.InvalidInput(error_message=msg) return val", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "{'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'is_visible': True, 'default':", "\"get_qos_queues\" class DefaultQueueCreateNotAdmin(qexception.InUse): message = _(\"Need to be admin in", "extension.\" @classmethod def get_namespace(cls): return \"http://docs.openstack.org/ext/nvp-qos/api/v2.0\" @classmethod def get_updated(cls): return", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "bandwidth rate, %(data)s must be a non negative\" \" integer.\")", "QueueMinGreaterMax(qexception.InvalidInput): message = _(\"Invalid bandwidth rate, min greater than max.\")", "to in writing, software # distributed under the License is", "@classmethod def get_alias(cls): return \"nvp-qos\" @classmethod def get_description(cls): return \"NVP", "value for dscp %(data)s must be integer.\") class QueueMinGreaterMax(qexception.InvalidInput): message", "True, 'default': False}, 'name': {'allow_post': True, 'allow_put': False, 'validate': {'type:string':", "= 'queue_id' RXTX_FACTOR = 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 = { 'ports': {", "'allow_put': False, 'is_visible': False, 'default': 1, 'convert_to': convert_to_unsigned_int_or_none}, QUEUE: {'allow_post':", "get_alias(cls): return \"nvp-qos\" @classmethod def get_description(cls): return \"NVP QoS extension.\"", "from quantum.common import exceptions as qexception from quantum import manager", "_(\"Invalid bandwidth rate, min greater than max.\") class QueueInvalidBandwidth(qexception.InvalidInput): message", "or agreed to in writing, software # distributed under the", "required by applicable law or agreed to in writing, software", "not associated with lqueue\") def convert_to_unsigned_int_or_none(val): if val is None:", "'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy':", "import attributes as attr from quantum.api.v2 import base from quantum.common", "'is_visible': False, 'default': 1, 'convert_to': convert_to_unsigned_int_or_none}, QUEUE: {'allow_post': False, 'allow_put':", "\"2012-10-05T10:00:00-00:00\" @classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" exts = []", "QueueInvalidBandwidth(qexception.InvalidInput): message = _(\"Invalid bandwidth rate, %(data)s must be a", "Networks, Inc. from abc import abstractmethod from quantum.api import extensions", "\"delete_qos_queue\" qos_queue_get = \"get_qos_queue\" qos_queue_list = \"get_qos_queues\" class DefaultQueueCreateNotAdmin(qexception.InUse): message", "Apache License, Version 2.0 (the \"License\"); you may # not", "called default\") class DefaultQueueAlreadyExists(qexception.InUse): message = _(\"Default queue already exists.\")", "for dscp %(data)s must be integer.\") class QueueMinGreaterMax(qexception.InvalidInput): message =", "def get_extended_resources(self, version): if version == \"2.0\": return dict(EXTENDED_ATTRIBUTES_2_0.items() +", "'untrusted', 'is_visible': True}, 'dscp': {'allow_post': True, 'allow_put': False, 'is_visible': True,", "Queue extension.\"\"\" @classmethod def get_name(cls): return \"nvp-qos\" @classmethod def get_alias(cls):", "agreed to in writing, software # distributed under the License", "integer.\") class MissingDSCPForTrusted(qexception.InvalidInput): message = _(\"No DSCP field needed when", "False, 'convert_to': attr.convert_to_boolean, 'is_visible': True, 'default': False}, 'name': {'allow_post': True,", "plugin, params, allow_bulk=False) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts", "min greater than max.\") class QueueInvalidBandwidth(qexception.InvalidInput): message = _(\"Invalid bandwidth", "distributed under the License is distributed on an \"AS IS\"", "{ 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'default': {'allow_post':", "pass @abstractmethod def delete_qos_queue(self, context, id): pass @abstractmethod def get_qos_queue(self,", "License, Version 2.0 (the \"License\"); you may # not use", "CONDITIONS OF ANY KIND, either express or implied. See the", "plugin = manager.QuantumManager.get_plugin() resource_name = 'qos_queue' collection_name = resource_name.replace('_', '-')", "'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post':", "return \"NVP QoS extension.\" @classmethod def get_namespace(cls): return \"http://docs.openstack.org/ext/nvp-qos/api/v2.0\" @classmethod", "'trusted']}, 'default': 'untrusted', 'is_visible': True}, 'dscp': {'allow_post': True, 'allow_put': False,", "than max.\") class QueueInvalidBandwidth(qexception.InvalidInput): message = _(\"Invalid bandwidth rate, %(data)s", "'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': ['untrusted',", "QueueNotFound(qexception.NotFound): message = _(\"Queue %(id)s does not exist\") class QueueInUseByPort(qexception.InUse):", "extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts def get_extended_resources(self, version): if version", "= \"get_qos_queue\" qos_queue_list = \"get_qos_queues\" class DefaultQueueCreateNotAdmin(qexception.InUse): message = _(\"Need", "not use this file except in compliance with the License.", "< 0: raise ValueError except (ValueError, TypeError): msg = _(\"'%s'", "QueueInvalidDscp(qexception.InvalidInput): message = _(\"Invalid value for dscp %(data)s must be", "writing, software # distributed under the License is distributed on", "True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'max':", "= _(\"Queue %(id)s does not exist\") class QueueInUseByPort(qexception.InUse): message =", "attributes as attr from quantum.api.v2 import base from quantum.common import", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "context, id, fields=None): pass @abstractmethod def get_qos_queues(self, context, filters=None, fields=None):", "id, fields=None): pass @abstractmethod def get_qos_queues(self, context, filters=None, fields=None): pass", "def get_name(cls): return \"nvp-qos\" @classmethod def get_alias(cls): return \"nvp-qos\" @classmethod", "= _(\"Invalid value for dscp %(data)s must be integer.\") class", "_(\"Unable to delete queue attached to port.\") class QueuePortBindingNotFound(qexception.NotFound): message", "'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'tenant_id': {'allow_post': True, 'allow_put':", "the License. You may obtain # a copy of the", "field needed when QoS workload marked trusted\") class QueueNotFound(qexception.NotFound): message", "True}, 'default': {'allow_post': True, 'allow_put': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True,", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "rate, %(data)s must be a non negative\" \" integer.\") class", "use this file except in compliance with the License. You", "qos_queue_get = \"get_qos_queue\" qos_queue_list = \"get_qos_queues\" class DefaultQueueCreateNotAdmin(qexception.InUse): message =", "def get_description(cls): return \"NVP QoS extension.\" @classmethod def get_namespace(cls): return", "RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) controller = base.create_resource(collection_name, resource_name, plugin, params,", "quantum.common import exceptions as qexception from quantum import manager #", "lqueue\") def convert_to_unsigned_int_or_none(val): if val is None: return try: val", "{'allow_post': False, 'allow_put': False, 'is_visible': True}, 'default': {'allow_post': True, 'allow_put':", "'max': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to':", "= _(\"Need to be admin in order to create queue", "in order to create queue called default\") class DefaultQueueAlreadyExists(qexception.InUse): message", "{'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none},", "= [] plugin = manager.QuantumManager.get_plugin() resource_name = 'qos_queue' collection_name =", "'id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'default': {'allow_post': True,", "if val < 0: raise ValueError except (ValueError, TypeError): msg", "False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'max': {'allow_post': True,", "# under the License. # # @author: <NAME>, Nicira Networks,", "= _(\"Default queue already exists.\") class QueueInvalidDscp(qexception.InvalidInput): message = _(\"Invalid", "message = _(\"Port is not associated with lqueue\") def convert_to_unsigned_int_or_none(val):", "convert_to_unsigned_int_or_none(val): if val is None: return try: val = int(val)", "}, } QUEUE = 'queue_id' RXTX_FACTOR = 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 =", "int(val) if val < 0: raise ValueError except (ValueError, TypeError):", "return val # Attribute Map RESOURCE_ATTRIBUTE_MAP = { 'qos_queues': {", "False, 'is_visible': True}, 'default': {'allow_post': True, 'allow_put': False, 'convert_to': attr.convert_to_boolean,", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "KIND, either express or implied. See the # License for", "workload marked trusted\") class QueueNotFound(qexception.NotFound): message = _(\"Queue %(id)s does", "rate, min greater than max.\") class QueueInvalidBandwidth(qexception.InvalidInput): message = _(\"Invalid", "def create_qos_queue(self, context, queue): pass @abstractmethod def delete_qos_queue(self, context, id):", "raise ValueError except (ValueError, TypeError): msg = _(\"'%s' must be", "\"License\"); you may # not use this file except in", "msg = _(\"'%s' must be a non negative integer.\") %", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "dict(EXTENDED_ATTRIBUTES_2_0.items() + RESOURCE_ATTRIBUTE_MAP.items()) else: return {} class QueuePluginBase(object): @abstractmethod def", "express or implied. See the # License for the specific", "Nicira, Inc. # All Rights Reserved # # Licensed under", "'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { RXTX_FACTOR: {'allow_post': True, 'allow_put':", "the Apache License, Version 2.0 (the \"License\"); you may #", "{'type:string': None}, 'is_visible': True, 'default': ''}, 'min': {'allow_post': True, 'allow_put':", "return exts def get_extended_resources(self, version): if version == \"2.0\": return", "'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'tenant_id': {'allow_post':", "''}, 'min': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0',", "} QUEUE = 'queue_id' RXTX_FACTOR = 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 = {", "version == \"2.0\": return dict(EXTENDED_ATTRIBUTES_2_0.items() + RESOURCE_ATTRIBUTE_MAP.items()) else: return {}", "not exist\") class QueueInUseByPort(qexception.InUse): message = _(\"Unable to delete queue", "See the # License for the specific language governing permissions", "trusted\") class QueueNotFound(qexception.NotFound): message = _(\"Queue %(id)s does not exist\")", "None}, 'is_visible': True}, }, } QUEUE = 'queue_id' RXTX_FACTOR =", "'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'min': {'allow_post': True,", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "version): if version == \"2.0\": return dict(EXTENDED_ATTRIBUTES_2_0.items() + RESOURCE_ATTRIBUTE_MAP.items()) else:", "{ 'qos_queues': { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True},", "%(data)s must be a non negative\" \" integer.\") class MissingDSCPForTrusted(qexception.InvalidInput):", "False, 'is_visible': True, 'default': None, 'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post': True,", "else: return {} class QueuePluginBase(object): @abstractmethod def create_qos_queue(self, context, queue):", "attr from quantum.api.v2 import base from quantum.common import exceptions as", "law or agreed to in writing, software # distributed under", "MissingDSCPForTrusted(qexception.InvalidInput): message = _(\"No DSCP field needed when QoS workload", "message = _(\"Unable to delete queue attached to port.\") class", "{QUEUE: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': False}} }", "False, 'validate': {'type:values': ['untrusted', 'trusted']}, 'default': 'untrusted', 'is_visible': True}, 'dscp':", "= _(\"Invalid bandwidth rate, min greater than max.\") class QueueInvalidBandwidth(qexception.InvalidInput):", "message = _(\"No DSCP field needed when QoS workload marked", "queue): pass @abstractmethod def delete_qos_queue(self, context, id): pass @abstractmethod def", "'is_visible': True}, 'default': {'allow_post': True, 'allow_put': False, 'convert_to': attr.convert_to_boolean, 'is_visible':", "implied. See the # License for the specific language governing", "\"\"\"Returns Ext Resources.\"\"\" exts = [] plugin = manager.QuantumManager.get_plugin() resource_name", "License. # # @author: <NAME>, Nicira Networks, Inc. from abc", "Inc. from abc import abstractmethod from quantum.api import extensions from", "\"get_qos_queue\" qos_queue_list = \"get_qos_queues\" class DefaultQueueCreateNotAdmin(qexception.InUse): message = _(\"Need to", "'networks': {QUEUE: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': False}}", "# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Nicira, Inc.", "'min': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to':", "already exists.\") class QueueInvalidDscp(qexception.InvalidInput): message = _(\"Invalid value for dscp", "True, 'default': None, 'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post': True, 'allow_put': False,", "from quantum import manager # For policy.json/Auth qos_queue_create = \"create_qos_queue\"", "+ RESOURCE_ATTRIBUTE_MAP.items()) else: return {} class QueuePluginBase(object): @abstractmethod def create_qos_queue(self,", "'default': False}}, 'networks': {QUEUE: {'allow_post': True, 'allow_put': True, 'is_visible': True,", "def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" exts = [] plugin =", "queue attached to port.\") class QueuePortBindingNotFound(qexception.NotFound): message = _(\"Port is", "to be admin in order to create queue called default\")", "[] plugin = manager.QuantumManager.get_plugin() resource_name = 'qos_queue' collection_name = resource_name.replace('_',", "qos_queue_delete = \"delete_qos_queue\" qos_queue_get = \"get_qos_queue\" qos_queue_list = \"get_qos_queues\" class", "get_qos_queue(self, context, id, fields=None): pass @abstractmethod def get_qos_queues(self, context, filters=None,", "integer.\") class QueueMinGreaterMax(qexception.InvalidInput): message = _(\"Invalid bandwidth rate, min greater", "'qos_queues': { 'id': {'allow_post': False, 'allow_put': False, 'is_visible': True}, 'default':", "collection_name = resource_name.replace('_', '-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name +", "'ports': { RXTX_FACTOR: {'allow_post': True, 'allow_put': False, 'is_visible': False, 'default':", "{ 'ports': { RXTX_FACTOR: {'allow_post': True, 'allow_put': False, 'is_visible': False,", "'default': None, 'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post': True, 'allow_put': False, 'validate':", "raise qexception.InvalidInput(error_message=msg) return val # Attribute Map RESOURCE_ATTRIBUTE_MAP = {", "Ext Resources.\"\"\" exts = [] plugin = manager.QuantumManager.get_plugin() resource_name =", "abstractmethod from quantum.api import extensions from quantum.api.v2 import attributes as", "context, id): pass @abstractmethod def get_qos_queue(self, context, id, fields=None): pass", "to port.\") class QueuePortBindingNotFound(qexception.NotFound): message = _(\"Port is not associated", "'convert_to': attr.convert_to_boolean, 'is_visible': True, 'default': False}, 'name': {'allow_post': True, 'allow_put':", "def get_updated(cls): return \"2012-10-05T10:00:00-00:00\" @classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\"", "{'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': convert_to_unsigned_int_or_none},", "val raise qexception.InvalidInput(error_message=msg) return val # Attribute Map RESOURCE_ATTRIBUTE_MAP =", "class QueuePluginBase(object): @abstractmethod def create_qos_queue(self, context, queue): pass @abstractmethod def", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "be admin in order to create queue called default\") class", "= _(\"Unable to delete queue attached to port.\") class QueuePortBindingNotFound(qexception.NotFound):", "# # Licensed under the Apache License, Version 2.0 (the", "= _(\"Port is not associated with lqueue\") def convert_to_unsigned_int_or_none(val): if", "'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'max': {'allow_post':", "'allow_put': False, 'is_visible': True}, 'default': {'allow_post': True, 'allow_put': False, 'convert_to':", "is None: return try: val = int(val) if val <", "base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=False) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex)", "queue already exists.\") class QueueInvalidDscp(qexception.InvalidInput): message = _(\"Invalid value for", "resource_name, plugin, params, allow_bulk=False) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return", "associated with lqueue\") def convert_to_unsigned_int_or_none(val): if val is None: return", "obtain # a copy of the License at # #", "Map RESOURCE_ATTRIBUTE_MAP = { 'qos_queues': { 'id': {'allow_post': False, 'allow_put':", "Version 2.0 (the \"License\"); you may # not use this", "QueuePortBindingNotFound(qexception.NotFound): message = _(\"Port is not associated with lqueue\") def", "'validate': {'type:values': ['untrusted', 'trusted']}, 'default': 'untrusted', 'is_visible': True}, 'dscp': {'allow_post':", "message = _(\"Need to be admin in order to create", "= \"get_qos_queues\" class DefaultQueueCreateNotAdmin(qexception.InUse): message = _(\"Need to be admin", "'allow_put': False, 'is_visible': True, 'default': False}}, 'networks': {QUEUE: {'allow_post': True,", "convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': ['untrusted', 'trusted']},", "['untrusted', 'trusted']}, 'default': 'untrusted', 'is_visible': True}, 'dscp': {'allow_post': True, 'allow_put':", "be a non negative\" \" integer.\") class MissingDSCPForTrusted(qexception.InvalidInput): message =", "License for the specific language governing permissions and limitations #", "'allow_put': True, 'is_visible': True, 'default': False}} } class Nvp_qos(object): \"\"\"Port", "class Nvp_qos(object): \"\"\"Port Queue extension.\"\"\" @classmethod def get_name(cls): return \"nvp-qos\"", "Copyright 2013 Nicira, Inc. # All Rights Reserved # #", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': None},", "permissions and limitations # under the License. # # @author:", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "be a non negative integer.\") % val raise qexception.InvalidInput(error_message=msg) return", "Resources.\"\"\" exts = [] plugin = manager.QuantumManager.get_plugin() resource_name = 'qos_queue'", "% val raise qexception.InvalidInput(error_message=msg) return val # Attribute Map RESOURCE_ATTRIBUTE_MAP", "True}, }, } QUEUE = 'queue_id' RXTX_FACTOR = 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0", "default\") class DefaultQueueAlreadyExists(qexception.InUse): message = _(\"Default queue already exists.\") class", "\"create_qos_queue\" qos_queue_delete = \"delete_qos_queue\" qos_queue_get = \"get_qos_queue\" qos_queue_list = \"get_qos_queues\"", "# For policy.json/Auth qos_queue_create = \"create_qos_queue\" qos_queue_delete = \"delete_qos_queue\" qos_queue_get", "marked trusted\") class QueueNotFound(qexception.NotFound): message = _(\"Queue %(id)s does not", "convert_to_unsigned_int_or_none}, QUEUE: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': False}},", "QUEUE = 'queue_id' RXTX_FACTOR = 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 = { 'ports':", "extensions from quantum.api.v2 import attributes as attr from quantum.api.v2 import", "QoS extension.\" @classmethod def get_namespace(cls): return \"http://docs.openstack.org/ext/nvp-qos/api/v2.0\" @classmethod def get_updated(cls):", "delete queue attached to port.\") class QueuePortBindingNotFound(qexception.NotFound): message = _(\"Port", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "resource_name.replace('_', '-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict())", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=False) ex = extensions.ResourceExtension(collection_name, controller)", "= RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) controller = base.create_resource(collection_name, resource_name, plugin,", "controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=False) ex = extensions.ResourceExtension(collection_name,", "@abstractmethod def create_qos_queue(self, context, queue): pass @abstractmethod def delete_qos_queue(self, context,", "from quantum.api.v2 import base from quantum.common import exceptions as qexception", "'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible': True}, }, } QUEUE", "True, 'allow_put': False, 'is_visible': True, 'default': None, 'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking':", "+ \"s\", dict()) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=False)", "compliance with the License. You may obtain # a copy", "import exceptions as qexception from quantum import manager # For", "False}} } class Nvp_qos(object): \"\"\"Port Queue extension.\"\"\" @classmethod def get_name(cls):", "\"http://docs.openstack.org/ext/nvp-qos/api/v2.0\" @classmethod def get_updated(cls): return \"2012-10-05T10:00:00-00:00\" @classmethod def get_resources(cls): \"\"\"Returns", "False, 'allow_put': False, 'is_visible': True, 'default': False}}, 'networks': {QUEUE: {'allow_post':", "admin in order to create queue called default\") class DefaultQueueAlreadyExists(qexception.InUse):", "True, 'allow_put': False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'tenant_id':", "{'allow_post': True, 'allow_put': True, 'is_visible': True, 'default': False}} } class", "as attr from quantum.api.v2 import base from quantum.common import exceptions", "2013 Nicira, Inc. # All Rights Reserved # # Licensed", "{'allow_post': True, 'allow_put': False, 'validate': {'type:values': ['untrusted', 'trusted']}, 'default': 'untrusted',", "\"NVP QoS extension.\" @classmethod def get_namespace(cls): return \"http://docs.openstack.org/ext/nvp-qos/api/v2.0\" @classmethod def", "= manager.QuantumManager.get_plugin() resource_name = 'qos_queue' collection_name = resource_name.replace('_', '-') +", "'is_visible': True, 'default': False}} } class Nvp_qos(object): \"\"\"Port Queue extension.\"\"\"", "val is None: return try: val = int(val) if val", "val < 0: raise ValueError except (ValueError, TypeError): msg =", "{'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': False}}, 'networks': {QUEUE:", "the # License for the specific language governing permissions and", "False, 'default': 1, 'convert_to': convert_to_unsigned_int_or_none}, QUEUE: {'allow_post': False, 'allow_put': False,", "# # Unless required by applicable law or agreed to", "Nvp_qos(object): \"\"\"Port Queue extension.\"\"\" @classmethod def get_name(cls): return \"nvp-qos\" @classmethod", "return \"http://docs.openstack.org/ext/nvp-qos/api/v2.0\" @classmethod def get_updated(cls): return \"2012-10-05T10:00:00-00:00\" @classmethod def get_resources(cls):", "create queue called default\") class DefaultQueueAlreadyExists(qexception.InUse): message = _(\"Default queue", "class QueueInvalidDscp(qexception.InvalidInput): message = _(\"Invalid value for dscp %(data)s must", "exts def get_extended_resources(self, version): if version == \"2.0\": return dict(EXTENDED_ATTRIBUTES_2_0.items()", "'qos_marking': {'allow_post': True, 'allow_put': False, 'validate': {'type:values': ['untrusted', 'trusted']}, 'default':", "== \"2.0\": return dict(EXTENDED_ATTRIBUTES_2_0.items() + RESOURCE_ATTRIBUTE_MAP.items()) else: return {} class", "vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2013 Nicira, Inc. #", "_(\"Port is not associated with lqueue\") def convert_to_unsigned_int_or_none(val): if val", "from quantum.api import extensions from quantum.api.v2 import attributes as attr", "base from quantum.common import exceptions as qexception from quantum import", "attr.convert_to_boolean, 'is_visible': True, 'default': False}, 'name': {'allow_post': True, 'allow_put': False,", "import manager # For policy.json/Auth qos_queue_create = \"create_qos_queue\" qos_queue_delete =", "resource_name = 'qos_queue' collection_name = resource_name.replace('_', '-') + \"s\" params", "True, 'default': False}}, 'networks': {QUEUE: {'allow_post': True, 'allow_put': True, 'is_visible':", "0: raise ValueError except (ValueError, TypeError): msg = _(\"'%s' must", "integer.\") % val raise qexception.InvalidInput(error_message=msg) return val # Attribute Map", "2.0 (the \"License\"); you may # not use this file", "<NAME>, Nicira Networks, Inc. from abc import abstractmethod from quantum.api", "'is_visible': True, 'default': ''}, 'min': {'allow_post': True, 'allow_put': False, 'is_visible':", "params, allow_bulk=False) ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts def", "{ RXTX_FACTOR: {'allow_post': True, 'allow_put': False, 'is_visible': False, 'default': 1,", "class DefaultQueueAlreadyExists(qexception.InUse): message = _(\"Default queue already exists.\") class QueueInvalidDscp(qexception.InvalidInput):", "= int(val) if val < 0: raise ValueError except (ValueError,", "quantum import manager # For policy.json/Auth qos_queue_create = \"create_qos_queue\" qos_queue_delete", "get_updated(cls): return \"2012-10-05T10:00:00-00:00\" @classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" exts", "{'allow_post': True, 'allow_put': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True, 'default': False},", "greater than max.\") class QueueInvalidBandwidth(qexception.InvalidInput): message = _(\"Invalid bandwidth rate,", "by applicable law or agreed to in writing, software #", "class QueueNotFound(qexception.NotFound): message = _(\"Queue %(id)s does not exist\") class", "'is_visible': True, 'default': False}}, 'networks': {QUEUE: {'allow_post': True, 'allow_put': True,", "must be a non negative\" \" integer.\") class MissingDSCPForTrusted(qexception.InvalidInput): message", "_(\"Invalid value for dscp %(data)s must be integer.\") class QueueMinGreaterMax(qexception.InvalidInput):", "when QoS workload marked trusted\") class QueueNotFound(qexception.NotFound): message = _(\"Queue", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "'default': False}, 'name': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None},", "# @author: <NAME>, Nicira Networks, Inc. from abc import abstractmethod", "'allow_put': False, 'validate': {'type:string': None}, 'is_visible': True, 'default': ''}, 'min':", "try: val = int(val) if val < 0: raise ValueError", "return \"2012-10-05T10:00:00-00:00\" @classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" exts =", "{'type:values': ['untrusted', 'trusted']}, 'default': 'untrusted', 'is_visible': True}, 'dscp': {'allow_post': True,", "(ValueError, TypeError): msg = _(\"'%s' must be a non negative", "@classmethod def get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" exts = [] plugin", "context, queue): pass @abstractmethod def delete_qos_queue(self, context, id): pass @abstractmethod", "manager.QuantumManager.get_plugin() resource_name = 'qos_queue' collection_name = resource_name.replace('_', '-') + \"s\"", "\"s\", dict()) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=False) ex", "quantum.api.v2 import base from quantum.common import exceptions as qexception from", "ex = extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts def get_extended_resources(self, version):", "# # @author: <NAME>, Nicira Networks, Inc. from abc import", "governing permissions and limitations # under the License. # #", "False}, 'name': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'is_visible':", "\"\"\"Port Queue extension.\"\"\" @classmethod def get_name(cls): return \"nvp-qos\" @classmethod def", "'convert_to': convert_to_unsigned_int_or_none}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate':", "may obtain # a copy of the License at #", "'allow_put': False, 'validate': {'type:values': ['untrusted', 'trusted']}, 'default': 'untrusted', 'is_visible': True},", "softtabstop=4 # Copyright 2013 Nicira, Inc. # All Rights Reserved", "False, 'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible': True}, }, }", "if val is None: return try: val = int(val) if", "@classmethod def get_updated(cls): return \"2012-10-05T10:00:00-00:00\" @classmethod def get_resources(cls): \"\"\"Returns Ext", "'is_visible': True, 'default': None, 'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post': True, 'allow_put':", "True, 'default': ''}, 'min': {'allow_post': True, 'allow_put': False, 'is_visible': True,", "Rights Reserved # # Licensed under the Apache License, Version", "Unless required by applicable law or agreed to in writing,", "DSCP field needed when QoS workload marked trusted\") class QueueNotFound(qexception.NotFound):", "message = _(\"Default queue already exists.\") class QueueInvalidDscp(qexception.InvalidInput): message =", "True, 'allow_put': True, 'is_visible': True, 'default': False}} } class Nvp_qos(object):", "\" integer.\") class MissingDSCPForTrusted(qexception.InvalidInput): message = _(\"No DSCP field needed", "\"nvp-qos\" @classmethod def get_alias(cls): return \"nvp-qos\" @classmethod def get_description(cls): return", "return try: val = int(val) if val < 0: raise", "get_description(cls): return \"NVP QoS extension.\" @classmethod def get_namespace(cls): return \"http://docs.openstack.org/ext/nvp-qos/api/v2.0\"", "get_extended_resources(self, version): if version == \"2.0\": return dict(EXTENDED_ATTRIBUTES_2_0.items() + RESOURCE_ATTRIBUTE_MAP.items())", "{'allow_post': True, 'allow_put': False, 'is_visible': False, 'default': 1, 'convert_to': convert_to_unsigned_int_or_none},", "must be integer.\") class QueueMinGreaterMax(qexception.InvalidInput): message = _(\"Invalid bandwidth rate,", "applicable law or agreed to in writing, software # distributed", "must be a non negative integer.\") % val raise qexception.InvalidInput(error_message=msg)", "%(data)s must be integer.\") class QueueMinGreaterMax(qexception.InvalidInput): message = _(\"Invalid bandwidth", "convert_to_unsigned_int_or_none}, 'tenant_id': {'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string':", "QUEUE: {'allow_post': False, 'allow_put': False, 'is_visible': True, 'default': False}}, 'networks':", "non negative\" \" integer.\") class MissingDSCPForTrusted(qexception.InvalidInput): message = _(\"No DSCP", "get_name(cls): return \"nvp-qos\" @classmethod def get_alias(cls): return \"nvp-qos\" @classmethod def", "OF ANY KIND, either express or implied. See the #", "'-') + \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) controller", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in writing, software # distributed under the License is distributed", "None: return try: val = int(val) if val < 0:", "queue called default\") class DefaultQueueAlreadyExists(qexception.InUse): message = _(\"Default queue already", "= { 'qos_queues': { 'id': {'allow_post': False, 'allow_put': False, 'is_visible':", "RXTX_FACTOR: {'allow_post': True, 'allow_put': False, 'is_visible': False, 'default': 1, 'convert_to':", "_(\"No DSCP field needed when QoS workload marked trusted\") class", "= extensions.ResourceExtension(collection_name, controller) exts.append(ex) return exts def get_extended_resources(self, version): if", "RESOURCE_ATTRIBUTE_MAP = { 'qos_queues': { 'id': {'allow_post': False, 'allow_put': False,", "{'type:string': None}, 'is_visible': True}, }, } QUEUE = 'queue_id' RXTX_FACTOR", "be integer.\") class QueueMinGreaterMax(qexception.InvalidInput): message = _(\"Invalid bandwidth rate, min", "exist\") class QueueInUseByPort(qexception.InUse): message = _(\"Unable to delete queue attached", "+ \"s\" params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) controller =", "@classmethod def get_namespace(cls): return \"http://docs.openstack.org/ext/nvp-qos/api/v2.0\" @classmethod def get_updated(cls): return \"2012-10-05T10:00:00-00:00\"", "= \"delete_qos_queue\" qos_queue_get = \"get_qos_queue\" qos_queue_list = \"get_qos_queues\" class DefaultQueueCreateNotAdmin(qexception.InUse):", "return dict(EXTENDED_ATTRIBUTES_2_0.items() + RESOURCE_ATTRIBUTE_MAP.items()) else: return {} class QueuePluginBase(object): @abstractmethod", "except (ValueError, TypeError): msg = _(\"'%s' must be a non", "'0', 'convert_to': convert_to_unsigned_int_or_none}, 'max': {'allow_post': True, 'allow_put': False, 'is_visible': True,", "None}, 'is_visible': True, 'default': ''}, 'min': {'allow_post': True, 'allow_put': False,", "either express or implied. See the # License for the", "abc import abstractmethod from quantum.api import extensions from quantum.api.v2 import", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "import extensions from quantum.api.v2 import attributes as attr from quantum.api.v2", "may # not use this file except in compliance with", "True, 'is_visible': True, 'default': False}} } class Nvp_qos(object): \"\"\"Port Queue", "non negative integer.\") % val raise qexception.InvalidInput(error_message=msg) return val #", "True, 'allow_put': False, 'is_visible': False, 'default': 1, 'convert_to': convert_to_unsigned_int_or_none}, QUEUE:", "shiftwidth=4 softtabstop=4 # Copyright 2013 Nicira, Inc. # All Rights", "with the License. You may obtain # a copy of", "# License for the specific language governing permissions and limitations", "_(\"'%s' must be a non negative integer.\") % val raise", "return \"nvp-qos\" @classmethod def get_description(cls): return \"NVP QoS extension.\" @classmethod", "# All Rights Reserved # # Licensed under the Apache", "'default': {'allow_post': True, 'allow_put': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True, 'default':", "qexception from quantum import manager # For policy.json/Auth qos_queue_create =", "return \"nvp-qos\" @classmethod def get_alias(cls): return \"nvp-qos\" @classmethod def get_description(cls):", "Reserved # # Licensed under the Apache License, Version 2.0", "you may # not use this file except in compliance", "QoS workload marked trusted\") class QueueNotFound(qexception.NotFound): message = _(\"Queue %(id)s", "@author: <NAME>, Nicira Networks, Inc. from abc import abstractmethod from", "class MissingDSCPForTrusted(qexception.InvalidInput): message = _(\"No DSCP field needed when QoS", "True}, 'dscp': {'allow_post': True, 'allow_put': False, 'is_visible': True, 'default': '0',", "'name': {'allow_post': True, 'allow_put': False, 'validate': {'type:string': None}, 'is_visible': True,", "DefaultQueueCreateNotAdmin(qexception.InUse): message = _(\"Need to be admin in order to", "from abc import abstractmethod from quantum.api import extensions from quantum.api.v2", "limitations # under the License. # # @author: <NAME>, Nicira", "Attribute Map RESOURCE_ATTRIBUTE_MAP = { 'qos_queues': { 'id': {'allow_post': False,", "= \"create_qos_queue\" qos_queue_delete = \"delete_qos_queue\" qos_queue_get = \"get_qos_queue\" qos_queue_list =", "val # Attribute Map RESOURCE_ATTRIBUTE_MAP = { 'qos_queues': { 'id':", "from quantum.api.v2 import attributes as attr from quantum.api.v2 import base", "\"2.0\": return dict(EXTENDED_ATTRIBUTES_2_0.items() + RESOURCE_ATTRIBUTE_MAP.items()) else: return {} class QueuePluginBase(object):", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "RXTX_FACTOR = 'rxtx_factor' EXTENDED_ATTRIBUTES_2_0 = { 'ports': { RXTX_FACTOR: {'allow_post':", "language governing permissions and limitations # under the License. #", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "get_resources(cls): \"\"\"Returns Ext Resources.\"\"\" exts = [] plugin = manager.QuantumManager.get_plugin()", "negative\" \" integer.\") class MissingDSCPForTrusted(qexception.InvalidInput): message = _(\"No DSCP field", "pass @abstractmethod def get_qos_queue(self, context, id, fields=None): pass @abstractmethod def", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "as qexception from quantum import manager # For policy.json/Auth qos_queue_create", "dict()) controller = base.create_resource(collection_name, resource_name, plugin, params, allow_bulk=False) ex =", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "exts = [] plugin = manager.QuantumManager.get_plugin() resource_name = 'qos_queue' collection_name", "class QueuePortBindingNotFound(qexception.NotFound): message = _(\"Port is not associated with lqueue\")", "'default': 1, 'convert_to': convert_to_unsigned_int_or_none}, QUEUE: {'allow_post': False, 'allow_put': False, 'is_visible':", "negative integer.\") % val raise qexception.InvalidInput(error_message=msg) return val # Attribute", "for the specific language governing permissions and limitations # under", "@classmethod def get_name(cls): return \"nvp-qos\" @classmethod def get_alias(cls): return \"nvp-qos\"", "True, 'validate': {'type:string': None}, 'is_visible': True}, }, } QUEUE =", "create_qos_queue(self, context, queue): pass @abstractmethod def delete_qos_queue(self, context, id): pass", "exceptions as qexception from quantum import manager # For policy.json/Auth", "'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible': True}, },", "'is_visible': True}, }, } QUEUE = 'queue_id' RXTX_FACTOR = 'rxtx_factor'", "def get_qos_queue(self, context, id, fields=None): pass @abstractmethod def get_qos_queues(self, context,", "import base from quantum.common import exceptions as qexception from quantum", "except in compliance with the License. You may obtain #", "False}}, 'networks': {QUEUE: {'allow_post': True, 'allow_put': True, 'is_visible': True, 'default':", "order to create queue called default\") class DefaultQueueAlreadyExists(qexception.InUse): message =", "False, 'is_visible': True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'tenant_id': {'allow_post': True,", "All Rights Reserved # # Licensed under the Apache License,", "True, 'allow_put': False, 'validate': {'type:values': ['untrusted', 'trusted']}, 'default': 'untrusted', 'is_visible':", "True, 'default': '0', 'convert_to': convert_to_unsigned_int_or_none}, 'tenant_id': {'allow_post': True, 'allow_put': False,", "a non negative\" \" integer.\") class MissingDSCPForTrusted(qexception.InvalidInput): message = _(\"No", "params = RESOURCE_ATTRIBUTE_MAP.get(resource_name + \"s\", dict()) controller = base.create_resource(collection_name, resource_name,", "License. You may obtain # a copy of the License", "QueueInUseByPort(qexception.InUse): message = _(\"Unable to delete queue attached to port.\")", "@classmethod def get_description(cls): return \"NVP QoS extension.\" @classmethod def get_namespace(cls):", "ANY KIND, either express or implied. See the # License", "# distributed under the License is distributed on an \"AS", "True, 'allow_put': False, 'convert_to': attr.convert_to_boolean, 'is_visible': True, 'default': False}, 'name':", "@abstractmethod def delete_qos_queue(self, context, id): pass @abstractmethod def get_qos_queue(self, context,", "# Unless required by applicable law or agreed to in", "None, 'convert_to': convert_to_unsigned_int_or_none}, 'qos_marking': {'allow_post': True, 'allow_put': False, 'validate': {'type:values':", "\"nvp-qos\" @classmethod def get_description(cls): return \"NVP QoS extension.\" @classmethod def", "class DefaultQueueCreateNotAdmin(qexception.InUse): message = _(\"Need to be admin in order", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "'is_visible': True, 'default': False}, 'name': {'allow_post': True, 'allow_put': False, 'validate':", "def get_alias(cls): return \"nvp-qos\" @classmethod def get_description(cls): return \"NVP QoS", "{'allow_post': True, 'allow_put': False, 'required_by_policy': True, 'validate': {'type:string': None}, 'is_visible':", "controller) exts.append(ex) return exts def get_extended_resources(self, version): if version ==", "import abstractmethod from quantum.api import extensions from quantum.api.v2 import attributes", "def get_namespace(cls): return \"http://docs.openstack.org/ext/nvp-qos/api/v2.0\" @classmethod def get_updated(cls): return \"2012-10-05T10:00:00-00:00\" @classmethod", "and limitations # under the License. # # @author: <NAME>,", "Nicira Networks, Inc. from abc import abstractmethod from quantum.api import", "with lqueue\") def convert_to_unsigned_int_or_none(val): if val is None: return try:", "to create queue called default\") class DefaultQueueAlreadyExists(qexception.InUse): message = _(\"Default", "False, 'is_visible': True, 'default': False}}, 'networks': {QUEUE: {'allow_post': True, 'allow_put':", "For policy.json/Auth qos_queue_create = \"create_qos_queue\" qos_queue_delete = \"delete_qos_queue\" qos_queue_get =", "QueuePluginBase(object): @abstractmethod def create_qos_queue(self, context, queue): pass @abstractmethod def delete_qos_queue(self,", "'default': False}} } class Nvp_qos(object): \"\"\"Port Queue extension.\"\"\" @classmethod def", "or implied. See the # License for the specific language", "False, 'allow_put': False, 'is_visible': True}, 'default': {'allow_post': True, 'allow_put': False," ]
[ "that you'd ever need for your AI projects, when used", "AI projects, when used alongside Numpy. To suggest more to", "issue on the GitHub repo. \"\"\" from easyneuron.math.distance import euclidean_distance", "you'd ever need for your AI projects, when used alongside", "of the maths tools that you'd ever need for your", "be added, please add an issue on the GitHub repo.", "all of the maths tools that you'd ever need for", "for your AI projects, when used alongside Numpy. To suggest", "used alongside Numpy. To suggest more to be added, please", "added, please add an issue on the GitHub repo. \"\"\"", "\"\"\"easyneuron.math contains all of the maths tools that you'd ever", "Numpy. To suggest more to be added, please add an", "to be added, please add an issue on the GitHub", "tools that you'd ever need for your AI projects, when", "please add an issue on the GitHub repo. \"\"\" from", "need for your AI projects, when used alongside Numpy. To", "when used alongside Numpy. To suggest more to be added,", "more to be added, please add an issue on the", "an issue on the GitHub repo. \"\"\" from easyneuron.math.distance import", "add an issue on the GitHub repo. \"\"\" from easyneuron.math.distance", "alongside Numpy. To suggest more to be added, please add", "ever need for your AI projects, when used alongside Numpy.", "<reponame>TrendingTechnology/easyneuron \"\"\"easyneuron.math contains all of the maths tools that you'd", "projects, when used alongside Numpy. To suggest more to be", "the maths tools that you'd ever need for your AI", "your AI projects, when used alongside Numpy. To suggest more", "maths tools that you'd ever need for your AI projects,", "To suggest more to be added, please add an issue", "suggest more to be added, please add an issue on", "contains all of the maths tools that you'd ever need" ]
[ "workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run_multiple.return_value = [1001, 1002]", "= TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) assert 0 == workingarea.open.call_count assert 0", "def test_put_multiple(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx", "dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run_multiple.return_value = [1001,", "import mock from alphatwirl.concurrently import TaskPackageDropbox ##__________________________________________________________________|| @pytest.fixture() def workingarea():", "import pytest try: import unittest.mock as mock except ImportError: import", "obj.open() assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert", "mock.call(workingarea, 1)] == dispatcher.run.call_args_list def test_put_multiple(obj, workingarea, dispatcher): workingarea.put_package.side_effect =", "1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 1 ==", "1)] == dispatcher.run.call_args_list def test_put_multiple(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0,", "== workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count", "assert [0, 1] == obj.put_multiple([package0, package1]) assert [mock.call(package0), mock.call(package1)] ==", "@pytest.fixture() def workingarea(): return mock.MagicMock() @pytest.fixture() def dispatcher(): return mock.MagicMock()", "== workingarea.close.call_count assert 1 == dispatcher.terminate.call_count obj.close() assert 1 ==", "assert 1 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count def test_put(obj,", "sleep=0.01) ret.open() yield ret ret.close() ##__________________________________________________________________|| def test_repr(obj): repr(obj) def", "mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list", "== dispatcher.terminate.call_count obj.close() assert 1 == workingarea.open.call_count assert 1 ==", "def test_open_terminate_close(workingarea, dispatcher): obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) assert 0", "dispatcher=dispatcher, sleep=0.01) assert 0 == workingarea.open.call_count assert 0 == workingarea.close.call_count", "repr(obj) def test_open_terminate_close(workingarea, dispatcher): obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) assert", "test_open_terminate_close(workingarea, dispatcher): obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) assert 0 ==", "assert 1 == workingarea.open.call_count assert 1 == workingarea.close.call_count assert 1", "= [0, 1] # pkgidx dispatcher.run_multiple.return_value = [1001, 1002] #", "assert 0 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count obj.close() assert", "dispatcher=dispatcher, sleep=0.01) ret.open() yield ret ret.close() ##__________________________________________________________________|| def test_repr(obj): repr(obj)", "dispatcher): obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) assert 0 == workingarea.open.call_count", "runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert [0, 1]", "assert 0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.terminate() assert", "sleep=0.01) assert 0 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert", "<<EMAIL>> import pytest try: import unittest.mock as mock except ImportError:", "[mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, [0, 1])] == dispatcher.run_multiple.call_args_list", "assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, [0, 1])] ==", "# pkgidx dispatcher.run.side_effect = [1001, 1002] # runid package0 =", "return mock.MagicMock() @pytest.fixture() def obj(workingarea, dispatcher): ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher,", "dispatcher.terminate.call_count obj.close() assert 1 == workingarea.open.call_count assert 1 == workingarea.close.call_count", "ret ret.close() ##__________________________________________________________________|| def test_repr(obj): repr(obj) def test_open_terminate_close(workingarea, dispatcher): obj", "assert 0 == obj.put(package0) assert 1 == obj.put(package1) assert [mock.call(package0),", "1 == workingarea.open.call_count assert 1 == workingarea.close.call_count assert 1 ==", "yield ret ret.close() ##__________________________________________________________________|| def test_repr(obj): repr(obj) def test_open_terminate_close(workingarea, dispatcher):", "1 == dispatcher.terminate.call_count def test_put(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0,", "test_put(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run.side_effect", "1002] # runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert", "ImportError: import mock from alphatwirl.concurrently import TaskPackageDropbox ##__________________________________________________________________|| @pytest.fixture() def", "# runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert [0,", "from alphatwirl.concurrently import TaskPackageDropbox ##__________________________________________________________________|| @pytest.fixture() def workingarea(): return mock.MagicMock()", "obj.put_multiple([package0, package1]) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, [0,", "assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 1", "[0, 1] == obj.put_multiple([package0, package1]) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list", "1] == obj.put_multiple([package0, package1]) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert", "<NAME> <<EMAIL>> import pytest try: import unittest.mock as mock except", "assert 1 == dispatcher.terminate.call_count obj.close() assert 1 == workingarea.open.call_count assert", "1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0 ==", "= mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert 0 == obj.put(package0) assert", "mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert [0, 1] == obj.put_multiple([package0, package1])", "1] # pkgidx dispatcher.run_multiple.return_value = [1001, 1002] # runid package0", "[mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] ==", "mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, [0, 1])] == dispatcher.run_multiple.call_args_list ##__________________________________________________________________||", "ret.close() ##__________________________________________________________________|| def test_repr(obj): repr(obj) def test_open_terminate_close(workingarea, dispatcher): obj =", "workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.open()", "1 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count def test_put(obj, workingarea,", "workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.terminate()", "0 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0 ==", "assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, 0), mock.call(workingarea, 1)]", "mock except ImportError: import mock from alphatwirl.concurrently import TaskPackageDropbox ##__________________________________________________________________||", "mock.MagicMock() @pytest.fixture() def dispatcher(): return mock.MagicMock() @pytest.fixture() def obj(workingarea, dispatcher):", "dispatcher.terminate.call_count obj.open() assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count", "= [1001, 1002] # runid package0 = mock.MagicMock(name='package0') package1 =", "def test_repr(obj): repr(obj) def test_open_terminate_close(workingarea, dispatcher): obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher,", "assert 0 == dispatcher.terminate.call_count obj.open() assert 1 == workingarea.open.call_count assert", "== dispatcher.run.call_args_list def test_put_multiple(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1]", "dispatcher.run.call_args_list def test_put_multiple(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] #", "dispatcher): ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) ret.open() yield ret ret.close()", "alphatwirl.concurrently import TaskPackageDropbox ##__________________________________________________________________|| @pytest.fixture() def workingarea(): return mock.MagicMock() @pytest.fixture()", "TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) ret.open() yield ret ret.close() ##__________________________________________________________________|| def test_repr(obj):", "= TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) ret.open() yield ret ret.close() ##__________________________________________________________________|| def", "obj(workingarea, dispatcher): ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) ret.open() yield ret", "workingarea.put_package.call_args_list assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list def test_put_multiple(obj,", "[1001, 1002] # runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1')", "[mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list def test_put_multiple(obj, workingarea, dispatcher):", "assert 0 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0", "0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list def test_put_multiple(obj, workingarea, dispatcher): workingarea.put_package.side_effect", "== workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.open() assert 1 ==", "runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert 0 ==", "== workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.terminate() assert 1 ==", "0 == dispatcher.terminate.call_count obj.terminate() assert 1 == workingarea.open.call_count assert 0", "except ImportError: import mock from alphatwirl.concurrently import TaskPackageDropbox ##__________________________________________________________________|| @pytest.fixture()", "##__________________________________________________________________|| @pytest.fixture() def workingarea(): return mock.MagicMock() @pytest.fixture() def dispatcher(): return", "== obj.put_multiple([package0, package1]) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea,", "ret.open() yield ret ret.close() ##__________________________________________________________________|| def test_repr(obj): repr(obj) def test_open_terminate_close(workingarea,", "== dispatcher.terminate.call_count def test_put(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1]", "assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list def test_put_multiple(obj, workingarea,", "== workingarea.open.call_count assert 0 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count", "dispatcher.terminate.call_count obj.terminate() assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count", "0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.terminate() assert 1", "== obj.put(package0) assert 1 == obj.put(package1) assert [mock.call(package0), mock.call(package1)] ==", "workingarea.open.call_count assert 1 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count def", "return mock.MagicMock() @pytest.fixture() def dispatcher(): return mock.MagicMock() @pytest.fixture() def obj(workingarea,", "mock.MagicMock(name='package1') assert 0 == obj.put(package0) assert 1 == obj.put(package1) assert", "== workingarea.close.call_count assert 1 == dispatcher.terminate.call_count def test_put(obj, workingarea, dispatcher):", "assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert 0", "def test_put(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx", "workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.terminate() assert 1 == workingarea.open.call_count", "0 == obj.put(package0) assert 1 == obj.put(package1) assert [mock.call(package0), mock.call(package1)]", "package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert [0, 1] ==", "package1 = mock.MagicMock(name='package1') assert [0, 1] == obj.put_multiple([package0, package1]) assert", "def obj(workingarea, dispatcher): ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) ret.open() yield", "workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run_multiple.return_value =", "workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run.side_effect =", "1 == obj.put(package1) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea,", "1 == dispatcher.terminate.call_count obj.close() assert 1 == workingarea.open.call_count assert 1", "1] # pkgidx dispatcher.run.side_effect = [1001, 1002] # runid package0", "dispatcher.terminate.call_count def test_put(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] #", "== workingarea.open.call_count assert 1 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count", "== dispatcher.terminate.call_count obj.open() assert 1 == workingarea.open.call_count assert 0 ==", "= mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert [0, 1] == obj.put_multiple([package0,", "= [0, 1] # pkgidx dispatcher.run.side_effect = [1001, 1002] #", "import TaskPackageDropbox ##__________________________________________________________________|| @pytest.fixture() def workingarea(): return mock.MagicMock() @pytest.fixture() def", "0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.open() assert 1", "obj.put(package1) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, 0), mock.call(workingarea,", "workingarea.open.call_count assert 0 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count obj.close()", "package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert 0 == obj.put(package0)", "= mock.MagicMock(name='package1') assert 0 == obj.put(package0) assert 1 == obj.put(package1)", "pkgidx dispatcher.run_multiple.return_value = [1001, 1002] # runid package0 = mock.MagicMock(name='package0')", "package1]) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, [0, 1])]", "dispatcher.run.side_effect = [1001, 1002] # runid package0 = mock.MagicMock(name='package0') package1", "workingarea.close.call_count assert 1 == dispatcher.terminate.call_count obj.close() assert 1 == workingarea.open.call_count", "# <NAME> <<EMAIL>> import pytest try: import unittest.mock as mock", "mock from alphatwirl.concurrently import TaskPackageDropbox ##__________________________________________________________________|| @pytest.fixture() def workingarea(): return", "mock.MagicMock(name='package1') assert [0, 1] == obj.put_multiple([package0, package1]) assert [mock.call(package0), mock.call(package1)]", "# pkgidx dispatcher.run_multiple.return_value = [1001, 1002] # runid package0 =", "= mock.MagicMock(name='package1') assert [0, 1] == obj.put_multiple([package0, package1]) assert [mock.call(package0),", "ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) ret.open() yield ret ret.close() ##__________________________________________________________________||", "TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) assert 0 == workingarea.open.call_count assert 0 ==", "workingarea.close.call_count assert 1 == dispatcher.terminate.call_count def test_put(obj, workingarea, dispatcher): workingarea.put_package.side_effect", "def workingarea(): return mock.MagicMock() @pytest.fixture() def dispatcher(): return mock.MagicMock() @pytest.fixture()", "mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert 0 == obj.put(package0) assert 1", "0 == dispatcher.terminate.call_count obj.open() assert 1 == workingarea.open.call_count assert 0", "@pytest.fixture() def dispatcher(): return mock.MagicMock() @pytest.fixture() def obj(workingarea, dispatcher): ret", "TaskPackageDropbox ##__________________________________________________________________|| @pytest.fixture() def workingarea(): return mock.MagicMock() @pytest.fixture() def dispatcher():", "test_repr(obj): repr(obj) def test_open_terminate_close(workingarea, dispatcher): obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01)", "##__________________________________________________________________|| def test_repr(obj): repr(obj) def test_open_terminate_close(workingarea, dispatcher): obj = TaskPackageDropbox(workingArea=workingarea,", "== workingarea.put_package.call_args_list assert [mock.call(workingarea, 0), mock.call(workingarea, 1)] == dispatcher.run.call_args_list def", "workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.open() assert 1 == workingarea.open.call_count", "== obj.put(package1) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert [mock.call(workingarea, 0),", "package1 = mock.MagicMock(name='package1') assert 0 == obj.put(package0) assert 1 ==", "unittest.mock as mock except ImportError: import mock from alphatwirl.concurrently import", "as mock except ImportError: import mock from alphatwirl.concurrently import TaskPackageDropbox", "obj.close() assert 1 == workingarea.open.call_count assert 1 == workingarea.close.call_count assert", "pkgidx dispatcher.run.side_effect = [1001, 1002] # runid package0 = mock.MagicMock(name='package0')", "assert 1 == obj.put(package1) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list assert", "dispatcher.run_multiple.return_value = [1001, 1002] # runid package0 = mock.MagicMock(name='package0') package1", "dispatcher(): return mock.MagicMock() @pytest.fixture() def obj(workingarea, dispatcher): ret = TaskPackageDropbox(workingArea=workingarea,", "try: import unittest.mock as mock except ImportError: import mock from", "workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run.side_effect = [1001, 1002]", "test_put_multiple(obj, workingarea, dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run_multiple.return_value", "mock.MagicMock() @pytest.fixture() def obj(workingarea, dispatcher): ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01)", "assert 0 == workingarea.close.call_count assert 0 == dispatcher.terminate.call_count obj.open() assert", "0 == workingarea.close.call_count assert 1 == dispatcher.terminate.call_count obj.close() assert 1", "# runid package0 = mock.MagicMock(name='package0') package1 = mock.MagicMock(name='package1') assert 0", "obj.put(package0) assert 1 == obj.put(package1) assert [mock.call(package0), mock.call(package1)] == workingarea.put_package.call_args_list", "[0, 1] # pkgidx dispatcher.run.side_effect = [1001, 1002] # runid", "obj.terminate() assert 1 == workingarea.open.call_count assert 0 == workingarea.close.call_count assert", "pytest try: import unittest.mock as mock except ImportError: import mock", "@pytest.fixture() def obj(workingarea, dispatcher): ret = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) ret.open()", "assert 1 == dispatcher.terminate.call_count def test_put(obj, workingarea, dispatcher): workingarea.put_package.side_effect =", "workingarea(): return mock.MagicMock() @pytest.fixture() def dispatcher(): return mock.MagicMock() @pytest.fixture() def", "== dispatcher.terminate.call_count obj.terminate() assert 1 == workingarea.open.call_count assert 0 ==", "dispatcher): workingarea.put_package.side_effect = [0, 1] # pkgidx dispatcher.run.side_effect = [1001,", "<reponame>shane-breeze/AlphaTwirl # <NAME> <<EMAIL>> import pytest try: import unittest.mock as", "obj = TaskPackageDropbox(workingArea=workingarea, dispatcher=dispatcher, sleep=0.01) assert 0 == workingarea.open.call_count assert", "import unittest.mock as mock except ImportError: import mock from alphatwirl.concurrently", "assert 0 == dispatcher.terminate.call_count obj.terminate() assert 1 == workingarea.open.call_count assert", "def dispatcher(): return mock.MagicMock() @pytest.fixture() def obj(workingarea, dispatcher): ret =", "[0, 1] # pkgidx dispatcher.run_multiple.return_value = [1001, 1002] # runid" ]
[ "= self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port_id) port = plugin.get_port(data['context'], port_id)", "networking_odl.dhcp import odl_dhcp_driver from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests.unit.dhcp import", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "may obtain # a copy of the License at #", "# All Rights Reserved. # # Licensed under the Apache", "# # Licensed under the Apache License, Version 2.0 (the", "All Rights Reserved. # # Licensed under the Apache License,", "agreed to in writing, software # distributed under the License", "Unless required by applicable law or agreed to in writing,", "self.mech.journal.sync_pending_entries() port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_delete_on_port_update_event(self):", "= plugin.get_port(data['context'], port_id) port['fixed_ips'] = [] ports = {'port': port}", "self.assertIsNotNone(port_id) port = plugin.get_port(data['context'], port_id) port['fixed_ips'] = [] ports =", "distributed under the License is distributed on an \"AS IS\"", "subnet_context = data['subnet_context'] plugin = data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id = self.get_port_id(data['plugin'],", "self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_delete_on_port_update_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24',", "odl_const.ODL_UPDATE, port) self.mech.journal.sync_pending_entries() port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNone(port_id)", "License, Version 2.0 (the \"License\"); you may # not use", "CONDITIONS OF ANY KIND, either express or implied. See the", "data['network_id'], data['subnet_id']) self.assertIsNotNone(port_id) port = plugin.get_port(data['context'], port_id) port['fixed_ips'] = []", "cfg load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def setUp(self):", "mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port) self.mech.journal.sync_pending_entries() port_id = self.get_port_id(data['plugin'], data['context'],", "obtain # a copy of the License at # #", "self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver)) def test_dhcp_port_create_on_subnet_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True)", "applicable law or agreed to in writing, software # distributed", "License. import testscenarios from networking_odl.common import constants as odl_const from", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def setUp(self): super(OdlDhcpDriverTestCase,", "testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def setUp(self): super(OdlDhcpDriverTestCase, self).setUp() cfg.CONF.set_override('enable_dhcp_service',", "subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id'])", "super(OdlDhcpDriverTestCase, self).setUp() cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl') self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def", "Version 2.0 (the \"License\"); you may # not use this", "specific language governing permissions and limitations # under the License.", "from networking_odl.dhcp import odl_dhcp_driver from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests.unit.dhcp", "subnet_context = data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port =", "= mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver))", "data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal(", "Copyright (c) 2017 OpenStack Foundation # All Rights Reserved. #", "# not use this file except in compliance with the", "not use this file except in compliance with the License.", "OF ANY KIND, either express or implied. See the #", "= [] ports = {'port': port} plugin.update_port(data['context'], port_id, ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal(", "True) subnet_context = data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port", "writing, software # distributed under the License is distributed on", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in writing, software # distributed under the License is distributed", "permissions and limitations # under the License. import testscenarios from", "governing permissions and limitations # under the License. import testscenarios", "mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver)) def", "cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl') self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service)", "(c) 2017 OpenStack Foundation # All Rights Reserved. # #", "in compliance with the License. You may obtain # a", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "License for the specific language governing permissions and limitations #", "mech_driver_v2 from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base from oslo_config import cfg load_tests", "self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] plugin = data['plugin']", "networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base from oslo_config import cfg load_tests = testscenarios.load_tests_apply_scenarios", "the License. You may obtain # a copy of the", "and limitations # under the License. import testscenarios from networking_odl.common", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "use this file except in compliance with the License. You", "plugin = data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id'])", "You may obtain # a copy of the License at", "port = plugin.get_port(data['context'], port_id) port['fixed_ips'] = [] ports = {'port':", "port_id, ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port) self.mech.journal.sync_pending_entries() port_id =", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "as odl_const from networking_odl.dhcp import odl_dhcp_driver from networking_odl.ml2 import mech_driver_v2", "def test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver)) def test_dhcp_port_create_on_subnet_event(self): data", "self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port_id) port =", "data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port_id) port = plugin.get_port(data['context'], port_id) port['fixed_ips'] =", "data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_delete_on_port_update_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True)", "cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def setUp(self): super(OdlDhcpDriverTestCase, self).setUp() cfg.CONF.set_override('enable_dhcp_service', True,", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "import mech_driver_v2 from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base from oslo_config import cfg", "port['fixed_ips'] = [] ports = {'port': port} plugin.update_port(data['context'], port_id, ports)", "Rights Reserved. # # Licensed under the Apache License, Version", "self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver,", "def test_dhcp_port_create_on_subnet_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context =", "either express or implied. See the # License for the", "[] ports = {'port': port} plugin.update_port(data['context'], port_id, ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context,", "networking_odl.common import constants as odl_const from networking_odl.dhcp import odl_dhcp_driver from", "under the License is distributed on an \"AS IS\" BASIS,", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "may # not use this file except in compliance with", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "import constants as odl_const from networking_odl.dhcp import odl_dhcp_driver from networking_odl.ml2", "port_id) port['fixed_ips'] = [] ports = {'port': port} plugin.update_port(data['context'], port_id,", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "with the License. You may obtain # a copy of", "KIND, either express or implied. See the # License for", "# License for the specific language governing permissions and limitations", "Reserved. # # Licensed under the Apache License, Version 2.0", "= data['subnet_context'] plugin = data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id = self.get_port_id(data['plugin'], data['context'],", "plugin.get_port(data['context'], port_id) port['fixed_ips'] = [] ports = {'port': port} plugin.update_port(data['context'],", "OpenStack Foundation # All Rights Reserved. # # Licensed under", "def setUp(self): super(OdlDhcpDriverTestCase, self).setUp() cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl') self.mech = mech_driver_v2.OpenDaylightMechanismDriver()", "you may # not use this file except in compliance", "from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base from oslo_config", "\"License\"); you may # not use this file except in", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def setUp(self): super(OdlDhcpDriverTestCase, self).setUp() cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl') self.mech =", "test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver)) def test_dhcp_port_create_on_subnet_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True,", "'networking_odl.common.config') class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def setUp(self): super(OdlDhcpDriverTestCase, self).setUp() cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl')", "express or implied. See the # License for the specific", "Foundation # All Rights Reserved. # # Licensed under the", "this file except in compliance with the License. You may", "= self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_delete_on_port_update_event(self): data =", "{'port': port} plugin.update_port(data['context'], port_id, ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port)", "import cfg load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def", "compliance with the License. You may obtain # a copy", "the Apache License, Version 2.0 (the \"License\"); you may #", "ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port) self.mech.journal.sync_pending_entries() port_id = self.get_port_id(data['plugin'],", "= testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def setUp(self): super(OdlDhcpDriverTestCase, self).setUp()", "data['subnet_id']) self.assertIsNotNone(port_id) port = plugin.get_port(data['context'], port_id) port['fixed_ips'] = [] ports", "self).setUp() cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl') self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def test_dhcp_flag_test(self):", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "networking_odl.ml2 import mech_driver_v2 from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base from oslo_config import", "data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port = self.get_port_id(data['plugin'], data['context'],", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "def test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver)) def test_dhcp_port_create_on_subnet_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True,", "limitations # under the License. import testscenarios from networking_odl.common import", "test_dhcp_delete_on_port_update_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context']", "See the # License for the specific language governing permissions", "self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver)) def test_dhcp_port_create_on_subnet_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24',", "plugin.update_port(data['context'], port_id, ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port) self.mech.journal.sync_pending_entries() port_id", "software # distributed under the License is distributed on an", "(the \"License\"); you may # not use this file except", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "the # License for the specific language governing permissions and", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "# # Unless required by applicable law or agreed to", "2017 OpenStack Foundation # All Rights Reserved. # # Licensed", "def test_dhcp_delete_on_port_update_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context =", "= self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context,", "the License. import testscenarios from networking_odl.common import constants as odl_const", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "file except in compliance with the License. You may obtain", "= {'port': port} plugin.update_port(data['context'], port_id, ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE,", "# Copyright (c) 2017 OpenStack Foundation # All Rights Reserved.", "= self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] plugin =", "from networking_odl.common import constants as odl_const from networking_odl.dhcp import odl_dhcp_driver", "True, True, True) subnet_context = data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE)", "for the specific language governing permissions and limitations # under", "law or agreed to in writing, software # distributed under", "data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] plugin", "OR CONDITIONS OF ANY KIND, either express or implied. See", "the specific language governing permissions and limitations # under the", "self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET,", "subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port) self.mech.journal.sync_pending_entries() port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'],", "constants as odl_const from networking_odl.dhcp import odl_dhcp_driver from networking_odl.ml2 import", "import odl_dhcp_driver from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base", "test_odl_dhcp_driver_base from oslo_config import cfg load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config')", "= data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port_id)", "from oslo_config import cfg load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class", "# under the License. import testscenarios from networking_odl.common import constants", "under the Apache License, Version 2.0 (the \"License\"); you may", "except in compliance with the License. You may obtain #", "2.0 (the \"License\"); you may # not use this file", "odl_const from networking_odl.dhcp import odl_dhcp_driver from networking_odl.ml2 import mech_driver_v2 from", "implied. See the # License for the specific language governing", "import test_odl_dhcp_driver_base from oslo_config import cfg load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl',", "odl_const.ODL_PORT, odl_const.ODL_UPDATE, port) self.mech.journal.sync_pending_entries() port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id'])", "from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base from oslo_config import cfg load_tests =", "data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port_id) port", "odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def", "mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port = self.get_port_id(data['plugin'], data['context'], data['network_id'],", "self.mech.initialize() def test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver)) def test_dhcp_port_create_on_subnet_event(self):", "language governing permissions and limitations # under the License. import", "License. You may obtain # a copy of the License", "import testscenarios from networking_odl.common import constants as odl_const from networking_odl.dhcp", "self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port_id) port = plugin.get_port(data['context'], port_id) port['fixed_ips']", "True) subnet_context = data['subnet_context'] plugin = data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id =", "by applicable law or agreed to in writing, software #", "# distributed under the License is distributed on an \"AS", "ANY KIND, either express or implied. See the # License", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "True, True) subnet_context = data['subnet_context'] plugin = data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id", "# Unless required by applicable law or agreed to in", "port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_delete_on_port_update_event(self): data", "port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port_id) port = plugin.get_port(data['context'],", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "to in writing, software # distributed under the License is", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "= data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port = self.get_port_id(data['plugin'],", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "'ml2_odl') self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def test_dhcp_driver_load(self):", "port} plugin.update_port(data['context'], port_id, ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_PORT, odl_const.ODL_UPDATE, port) self.mech.journal.sync_pending_entries()", "setUp(self): super(OdlDhcpDriverTestCase, self).setUp() cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl') self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize()", "testscenarios from networking_odl.common import constants as odl_const from networking_odl.dhcp import", "True, 'ml2_odl') self.mech = mech_driver_v2.OpenDaylightMechanismDriver() self.mech.initialize() def test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def", "test_dhcp_port_create_on_subnet_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context = data['subnet_context']", "or agreed to in writing, software # distributed under the", "odl_dhcp_driver from networking_odl.ml2 import mech_driver_v2 from networking_odl.tests.unit.dhcp import test_odl_dhcp_driver_base from", "under the License. import testscenarios from networking_odl.common import constants as", "required by applicable law or agreed to in writing, software", "data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_delete_on_port_update_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True,", "test_dhcp_flag_test(self): self.assertTrue(cfg.CONF.ml2_odl.enable_dhcp_service) def test_dhcp_driver_load(self): self.assertTrue(isinstance(self.mech.dhcp_driver, odl_dhcp_driver.OdlDhcpDriver)) def test_dhcp_port_create_on_subnet_event(self): data =", "data['subnet_context'] plugin = data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context) port_id = self.get_port_id(data['plugin'], data['context'], data['network_id'],", "oslo_config import cfg load_tests = testscenarios.load_tests_apply_scenarios cfg.CONF.import_group('ml2_odl', 'networking_odl.common.config') class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase):", "data['network_id'], data['subnet_id']) self.assertIsNotNone(port) def test_dhcp_delete_on_port_update_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True,", "class OdlDhcpDriverTestCase(test_odl_dhcp_driver_base.OdlDhcpDriverTestBase): def setUp(self): super(OdlDhcpDriverTestCase, self).setUp() cfg.CONF.set_override('enable_dhcp_service', True, 'ml2_odl') self.mech", "self.assertIsNotNone(port) def test_dhcp_delete_on_port_update_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context", "True, True, True) subnet_context = data['subnet_context'] plugin = data['plugin'] self.mech.dhcp_driver.create_or_delete_dhcp_port(subnet_context)", "ports = {'port': port} plugin.update_port(data['context'], port_id, ports) mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_PORT,", "odl_dhcp_driver.OdlDhcpDriver)) def test_dhcp_port_create_on_subnet_event(self): data = self.get_network_and_subnet_context('10.0.50.0/24', True, True, True) subnet_context", "True, True) subnet_context = data['subnet_context'] mech_driver_v2.OpenDaylightMechanismDriver._record_in_journal( subnet_context, odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries()", "or implied. See the # License for the specific language", "Apache License, Version 2.0 (the \"License\"); you may # not", "odl_const.ODL_SUBNET, odl_const.ODL_CREATE) self.mech.journal.sync_pending_entries() port = self.get_port_id(data['plugin'], data['context'], data['network_id'], data['subnet_id']) self.assertIsNotNone(port)" ]
[ "] operations = [ migrations.AlterField( model_name='users', name='site_key', field=models.CharField(blank=True, default='<KEY>', max_length=32,", "on 2019-11-13 13:52 from django.db import migrations, models class Migration(migrations.Migration):", "Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations = [", "13:52 from django.db import migrations, models class Migration(migrations.Migration): dependencies =", "django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('users',", "[ ('users', '0001_initial'), ] operations = [ migrations.AlterField( model_name='users', name='site_key',", "# Generated by Django 2.2.2 on 2019-11-13 13:52 from django.db", "2.2.2 on 2019-11-13 13:52 from django.db import migrations, models class", "migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ]", "[ migrations.AlterField( model_name='users', name='site_key', field=models.CharField(blank=True, default='<KEY>', max_length=32, unique=True), ), ]", "2019-11-13 13:52 from django.db import migrations, models class Migration(migrations.Migration): dependencies", "Django 2.2.2 on 2019-11-13 13:52 from django.db import migrations, models", "('users', '0001_initial'), ] operations = [ migrations.AlterField( model_name='users', name='site_key', field=models.CharField(blank=True,", "'0001_initial'), ] operations = [ migrations.AlterField( model_name='users', name='site_key', field=models.CharField(blank=True, default='<KEY>',", "import migrations, models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'),", "from django.db import migrations, models class Migration(migrations.Migration): dependencies = [", "operations = [ migrations.AlterField( model_name='users', name='site_key', field=models.CharField(blank=True, default='<KEY>', max_length=32, unique=True),", "by Django 2.2.2 on 2019-11-13 13:52 from django.db import migrations,", "Generated by Django 2.2.2 on 2019-11-13 13:52 from django.db import", "models class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations", "= [ ('users', '0001_initial'), ] operations = [ migrations.AlterField( model_name='users',", "class Migration(migrations.Migration): dependencies = [ ('users', '0001_initial'), ] operations =", "= [ migrations.AlterField( model_name='users', name='site_key', field=models.CharField(blank=True, default='<KEY>', max_length=32, unique=True), ),", "dependencies = [ ('users', '0001_initial'), ] operations = [ migrations.AlterField(" ]
[ "\"total_users\": total_users, \"total_groups\": total_groups, \"total_applications\": total_applications, \"new_users_per_day\": new_users_per_day, \"active_users_per_day\": active_users_per_day,", "timedelta(days=30), }, include_previous=True, ) active_users = handler.get_active_user_count( { \"active_users_last_24_hours\": timedelta(hours=24),", "24 hours, 7 days and\" \" 30 days. The `previous_`", "\"\"\" handler = AdminDashboardHandler() total_users = User.objects.filter(is_active=True).count() total_groups = Group.objects.all().count()", "signed up from 48 to 24 hours ago. It can", "datetime import timedelta from django.contrib.auth import get_user_model from drf_spectacular.utils import", "days. The `previous_` values are the values of the period", "to 24 hours ago. It can be used to calculate", "from rest_framework.permissions import IsAdminUser from rest_framework.views import APIView from baserow.api.decorators", "for the last 24 hours, 7 days and\" \" 30", "last 30 days is also included.\\n\\nThis is a **premium** feature.\",", "example `previous_new_users_last_24_hours` are the new users that signed up from", "\" \"example `previous_new_users_last_24_hours` are the new users that signed up", "an increase or decrease in the amount of signups. A", "\"new_users_last_7_days\": timedelta(days=7), \"new_users_last_30_days\": timedelta(days=30), }, include_previous=True, ) active_users = handler.get_active_user_count(", "\"active_users_last_7_days\": timedelta(days=7), \"active_users_last_30_days\": timedelta(days=30), }, include_previous=True, ) new_users_per_day = handler.get_new_user_count_per_day(", "calculate an increase or decrease \" \"in the amount of", "handler = AdminDashboardHandler() total_users = User.objects.filter(is_active=True).count() total_groups = Group.objects.all().count() total_applications", "and active users for the last 24 hours, 7 days", "day \" \"for the last 30 days is also included.\\n\\nThis", "the last 30 days is also included. \"\"\" handler =", "from drf_spectacular.utils import extend_schema from rest_framework.response import Response from rest_framework.permissions", "AdminDashboardHandler from .serializers import AdminDashboardSerializer User = get_user_model() class AdminDashboardView(APIView):", "extend_schema from rest_framework.response import Response from rest_framework.permissions import IsAdminUser from", "}, include_previous=True, ) active_users = handler.get_active_user_count( { \"active_users_last_24_hours\": timedelta(hours=24), \"active_users_last_7_days\":", "}, ) @accept_timezone() def get(self, request, now): \"\"\" Returns the", "\"total_groups\": total_groups, \"total_applications\": total_applications, \"new_users_per_day\": new_users_per_day, \"active_users_per_day\": active_users_per_day, **new_users, **active_users,", "get_user_model() class AdminDashboardView(APIView): permission_classes = (IsAdminUser,) @extend_schema( tags=[\"Admin\"], operation_id=\"admin_dashboard\", description=\"Returns", "the values of the period before, so for \" \"example", "users that signed up from 48 to 24 hours ago.", "from rest_framework.views import APIView from baserow.api.decorators import accept_timezone from baserow.core.models", "\"new_users_per_day\": new_users_per_day, \"active_users_per_day\": active_users_per_day, **new_users, **active_users, } ) return Response(serializer.data)", "@accept_timezone() def get(self, request, now): \"\"\" Returns the new and", "Group.objects.all().count() total_applications = Application.objects.all().count() new_users = handler.get_new_user_counts( { \"new_users_last_24_hours\": timedelta(hours=24),", "values of the period before, so for example `previous_new_users_last_24_hours` are", "from rest_framework.response import Response from rest_framework.permissions import IsAdminUser from rest_framework.views", "calculate an increase or decrease in the amount of signups.", "@extend_schema( tags=[\"Admin\"], operation_id=\"admin_dashboard\", description=\"Returns the new and active users for", "24 hours ago. It can be used to calculate an", "= handler.get_new_user_counts( { \"new_users_last_24_hours\": timedelta(hours=24), \"new_users_last_7_days\": timedelta(days=7), \"new_users_last_30_days\": timedelta(days=30), },", "users for every day \" \"for the last 30 days", "The `previous_` values are the values of the period before,", "users that signed up \" \"from 48 to 24 hours", "of the period before, so for example `previous_new_users_last_24_hours` are the", "increase or decrease in the amount of signups. A list", "the amount of signups. A list of the new and", "timedelta(hours=24), \"new_users_last_7_days\": timedelta(days=7), \"new_users_last_30_days\": timedelta(days=30), }, include_previous=True, ) active_users =", "before, so for example `previous_new_users_last_24_hours` are the new users that", "the period before, so for example `previous_new_users_last_24_hours` are the new", "= Group.objects.all().count() total_applications = Application.objects.all().count() new_users = handler.get_new_user_counts( { \"new_users_last_24_hours\":", "\"from 48 to 24 hours ago. It can be used", "def get(self, request, now): \"\"\" Returns the new and active", "last 24 hours, 7 days and 30 days. The `previous_`", "timedelta(days=7), \"active_users_last_30_days\": timedelta(days=30), }, include_previous=True, ) new_users_per_day = handler.get_new_user_count_per_day( timedelta(days=30),", "or decrease in the amount of signups. A list of", "so for example `previous_new_users_last_24_hours` are the new users that signed", "active users for every day for the last 30 days", "<reponame>cjh0613/baserow from datetime import timedelta from django.contrib.auth import get_user_model from", "timedelta(days=7), \"new_users_last_30_days\": timedelta(days=30), }, include_previous=True, ) active_users = handler.get_active_user_count( {", "can be used to calculate an increase or decrease \"", "is also included. \"\"\" handler = AdminDashboardHandler() total_users = User.objects.filter(is_active=True).count()", "hours, 7 days and\" \" 30 days. The `previous_` values", "also included.\\n\\nThis is a **premium** feature.\", responses={ 200: AdminDashboardSerializer, 401:", "users for the last 24 hours, 7 days and\" \"", "are the values of the period before, so for example", "days and 30 days. The `previous_` values are the values", "30 days is also included. \"\"\" handler = AdminDashboardHandler() total_users", "the new users that signed up from 48 to 24", "timedelta(hours=24), \"active_users_last_7_days\": timedelta(days=7), \"active_users_last_30_days\": timedelta(days=30), }, include_previous=True, ) new_users_per_day =", "total_groups, \"total_applications\": total_applications, \"new_users_per_day\": new_users_per_day, \"active_users_per_day\": active_users_per_day, **new_users, **active_users, }", "and active users for every day for the last 30", "`previous_new_users_last_24_hours` are the new users that signed up \" \"from", "import IsAdminUser from rest_framework.views import APIView from baserow.api.decorators import accept_timezone", "in the amount of signups. A list of the new", "rest_framework.response import Response from rest_framework.permissions import IsAdminUser from rest_framework.views import", "of the period before, so for \" \"example `previous_new_users_last_24_hours` are", "\"for the last 30 days is also included.\\n\\nThis is a", "a **premium** feature.\", responses={ 200: AdminDashboardSerializer, 401: None, }, )", "request, now): \"\"\" Returns the new and active users for", "A list of the new and active users for every", ".serializers import AdminDashboardSerializer User = get_user_model() class AdminDashboardView(APIView): permission_classes =", "of the new and active users for every day \"", "for example `previous_new_users_last_24_hours` are the new users that signed up", "up from 48 to 24 hours ago. It can be", "period before, so for \" \"example `previous_new_users_last_24_hours` are the new", "and active users for every day \" \"for the last", "the last 24 hours, 7 days and 30 days. The", "new_users = handler.get_new_user_counts( { \"new_users_last_24_hours\": timedelta(hours=24), \"new_users_last_7_days\": timedelta(days=7), \"new_users_last_30_days\": timedelta(days=30),", "total_users, \"total_groups\": total_groups, \"total_applications\": total_applications, \"new_users_per_day\": new_users_per_day, \"active_users_per_day\": active_users_per_day, **new_users,", "\" \"for the last 30 days is also included.\\n\\nThis is", "AdminDashboardSerializer( { \"total_users\": total_users, \"total_groups\": total_groups, \"total_applications\": total_applications, \"new_users_per_day\": new_users_per_day,", "= get_user_model() class AdminDashboardView(APIView): permission_classes = (IsAdminUser,) @extend_schema( tags=[\"Admin\"], operation_id=\"admin_dashboard\",", "days is also included.\\n\\nThis is a **premium** feature.\", responses={ 200:", "new users that signed up from 48 to 24 hours", "included. \"\"\" handler = AdminDashboardHandler() total_users = User.objects.filter(is_active=True).count() total_groups =", "= handler.get_new_user_count_per_day( timedelta(days=30), now=now ) active_users_per_day = handler.get_active_user_count_per_day( timedelta(days=30), now=now", "from datetime import timedelta from django.contrib.auth import get_user_model from drf_spectacular.utils", ") active_users = handler.get_active_user_count( { \"active_users_last_24_hours\": timedelta(hours=24), \"active_users_last_7_days\": timedelta(days=7), \"active_users_last_30_days\":", "}, include_previous=True, ) new_users_per_day = handler.get_new_user_count_per_day( timedelta(days=30), now=now ) active_users_per_day", "description=\"Returns the new and active users for the last 24", "from .serializers import AdminDashboardSerializer User = get_user_model() class AdminDashboardView(APIView): permission_classes", "rest_framework.permissions import IsAdminUser from rest_framework.views import APIView from baserow.api.decorators import", "the new and active users for every day \" \"for", "new_users_per_day = handler.get_new_user_count_per_day( timedelta(days=30), now=now ) active_users_per_day = handler.get_active_user_count_per_day( timedelta(days=30),", "get_user_model from drf_spectacular.utils import extend_schema from rest_framework.response import Response from", "values of the period before, so for \" \"example `previous_new_users_last_24_hours`", "used to calculate an increase or decrease in the amount", "feature.\", responses={ 200: AdminDashboardSerializer, 401: None, }, ) @accept_timezone() def", "users for every day for the last 30 days is", "accept_timezone from baserow.core.models import Group, Application from baserow_premium.admin.dashboard.handler import AdminDashboardHandler", "of signups. A list of the new and active users", "APIView from baserow.api.decorators import accept_timezone from baserow.core.models import Group, Application", "Response from rest_framework.permissions import IsAdminUser from rest_framework.views import APIView from", "and 30 days. The `previous_` values are the values of", "also included. \"\"\" handler = AdminDashboardHandler() total_users = User.objects.filter(is_active=True).count() total_groups", "are the new users that signed up \" \"from 48", "new and active users for every day \" \"for the", "active users for the last 24 hours, 7 days and", "the values of the period before, so for example `previous_new_users_last_24_hours`", "the new and active users for the last 24 hours,", "active_users_per_day = handler.get_active_user_count_per_day( timedelta(days=30), now=now ) serializer = AdminDashboardSerializer( {", "import AdminDashboardHandler from .serializers import AdminDashboardSerializer User = get_user_model() class", "now=now ) active_users_per_day = handler.get_active_user_count_per_day( timedelta(days=30), now=now ) serializer =", "from baserow_premium.admin.dashboard.handler import AdminDashboardHandler from .serializers import AdminDashboardSerializer User =", "\"new_users_last_24_hours\": timedelta(hours=24), \"new_users_last_7_days\": timedelta(days=7), \"new_users_last_30_days\": timedelta(days=30), }, include_previous=True, ) active_users", "active_users = handler.get_active_user_count( { \"active_users_last_24_hours\": timedelta(hours=24), \"active_users_last_7_days\": timedelta(days=7), \"active_users_last_30_days\": timedelta(days=30),", "amount of signups. A list of the new and active", "= handler.get_active_user_count( { \"active_users_last_24_hours\": timedelta(hours=24), \"active_users_last_7_days\": timedelta(days=7), \"active_users_last_30_days\": timedelta(days=30), },", "handler.get_new_user_count_per_day( timedelta(days=30), now=now ) active_users_per_day = handler.get_active_user_count_per_day( timedelta(days=30), now=now )", "last 24 hours, 7 days and\" \" 30 days. The", "signups. A list of the new and active users for", "active users for every day \" \"for the last 30", "AdminDashboardSerializer, 401: None, }, ) @accept_timezone() def get(self, request, now):", "values are the values of the period before, so for", "are the new users that signed up from 48 to", "import Group, Application from baserow_premium.admin.dashboard.handler import AdminDashboardHandler from .serializers import", "to calculate an increase or decrease \" \"in the amount", "be used to calculate an increase or decrease in the", "for the last 30 days is also included. \"\"\" handler", "\"new_users_last_30_days\": timedelta(days=30), }, include_previous=True, ) active_users = handler.get_active_user_count( { \"active_users_last_24_hours\":", "200: AdminDashboardSerializer, 401: None, }, ) @accept_timezone() def get(self, request,", "period before, so for example `previous_new_users_last_24_hours` are the new users", "now=now ) serializer = AdminDashboardSerializer( { \"total_users\": total_users, \"total_groups\": total_groups,", "User.objects.filter(is_active=True).count() total_groups = Group.objects.all().count() total_applications = Application.objects.all().count() new_users = handler.get_new_user_counts(", "\" 30 days. The `previous_` values are the values of", "active users for the last 24 hours, 7 days and\"", "\" \"from 48 to 24 hours ago. It can be", "total_applications, \"new_users_per_day\": new_users_per_day, \"active_users_per_day\": active_users_per_day, **new_users, **active_users, } ) return", ") @accept_timezone() def get(self, request, now): \"\"\" Returns the new", "of the new and active users for every day for", "`previous_new_users_last_24_hours` are the new users that signed up from 48", "{ \"total_users\": total_users, \"total_groups\": total_groups, \"total_applications\": total_applications, \"new_users_per_day\": new_users_per_day, \"active_users_per_day\":", "rest_framework.views import APIView from baserow.api.decorators import accept_timezone from baserow.core.models import", "`previous_` values are the values of the period before, so", "are the values of the period before, so for \"", "import accept_timezone from baserow.core.models import Group, Application from baserow_premium.admin.dashboard.handler import", "decrease \" \"in the amount of signups. A list of", "last 30 days is also included. \"\"\" handler = AdminDashboardHandler()", "AdminDashboardHandler() total_users = User.objects.filter(is_active=True).count() total_groups = Group.objects.all().count() total_applications = Application.objects.all().count()", "timedelta(days=30), now=now ) active_users_per_day = handler.get_active_user_count_per_day( timedelta(days=30), now=now ) serializer", "= handler.get_active_user_count_per_day( timedelta(days=30), now=now ) serializer = AdminDashboardSerializer( { \"total_users\":", "import get_user_model from drf_spectacular.utils import extend_schema from rest_framework.response import Response", "users for the last 24 hours, 7 days and 30", "baserow.api.decorators import accept_timezone from baserow.core.models import Group, Application from baserow_premium.admin.dashboard.handler", "User = get_user_model() class AdminDashboardView(APIView): permission_classes = (IsAdminUser,) @extend_schema( tags=[\"Admin\"],", "every day \" \"for the last 30 days is also", "None, }, ) @accept_timezone() def get(self, request, now): \"\"\" Returns", "for every day for the last 30 days is also", "It can be used to calculate an increase or decrease", "AdminDashboardSerializer User = get_user_model() class AdminDashboardView(APIView): permission_classes = (IsAdminUser,) @extend_schema(", "Application from baserow_premium.admin.dashboard.handler import AdminDashboardHandler from .serializers import AdminDashboardSerializer User", "total_applications = Application.objects.all().count() new_users = handler.get_new_user_counts( { \"new_users_last_24_hours\": timedelta(hours=24), \"new_users_last_7_days\":", "for every day \" \"for the last 30 days is", "new users that signed up \" \"from 48 to 24", "\"active_users_last_24_hours\": timedelta(hours=24), \"active_users_last_7_days\": timedelta(days=7), \"active_users_last_30_days\": timedelta(days=30), }, include_previous=True, ) new_users_per_day", "decrease in the amount of signups. A list of the", "permission_classes = (IsAdminUser,) @extend_schema( tags=[\"Admin\"], operation_id=\"admin_dashboard\", description=\"Returns the new and", "7 days and\" \" 30 days. The `previous_` values are", "handler.get_new_user_counts( { \"new_users_last_24_hours\": timedelta(hours=24), \"new_users_last_7_days\": timedelta(days=7), \"new_users_last_30_days\": timedelta(days=30), }, include_previous=True,", "handler.get_active_user_count_per_day( timedelta(days=30), now=now ) serializer = AdminDashboardSerializer( { \"total_users\": total_users,", "IsAdminUser from rest_framework.views import APIView from baserow.api.decorators import accept_timezone from", "baserow_premium.admin.dashboard.handler import AdminDashboardHandler from .serializers import AdminDashboardSerializer User = get_user_model()", "that signed up \" \"from 48 to 24 hours ago.", "**premium** feature.\", responses={ 200: AdminDashboardSerializer, 401: None, }, ) @accept_timezone()", "handler.get_active_user_count( { \"active_users_last_24_hours\": timedelta(hours=24), \"active_users_last_7_days\": timedelta(days=7), \"active_users_last_30_days\": timedelta(days=30), }, include_previous=True,", "import APIView from baserow.api.decorators import accept_timezone from baserow.core.models import Group,", "for the last 24 hours, 7 days and 30 days.", "and\" \" 30 days. The `previous_` values are the values", ") new_users_per_day = handler.get_new_user_count_per_day( timedelta(days=30), now=now ) active_users_per_day = handler.get_active_user_count_per_day(", "include_previous=True, ) new_users_per_day = handler.get_new_user_count_per_day( timedelta(days=30), now=now ) active_users_per_day =", "Returns the new and active users for the last 24", "= AdminDashboardSerializer( { \"total_users\": total_users, \"total_groups\": total_groups, \"total_applications\": total_applications, \"new_users_per_day\":", "so for \" \"example `previous_new_users_last_24_hours` are the new users that", "= User.objects.filter(is_active=True).count() total_groups = Group.objects.all().count() total_applications = Application.objects.all().count() new_users =", "timedelta from django.contrib.auth import get_user_model from drf_spectacular.utils import extend_schema from", "total_groups = Group.objects.all().count() total_applications = Application.objects.all().count() new_users = handler.get_new_user_counts( {", "Group, Application from baserow_premium.admin.dashboard.handler import AdminDashboardHandler from .serializers import AdminDashboardSerializer", "class AdminDashboardView(APIView): permission_classes = (IsAdminUser,) @extend_schema( tags=[\"Admin\"], operation_id=\"admin_dashboard\", description=\"Returns the", "include_previous=True, ) active_users = handler.get_active_user_count( { \"active_users_last_24_hours\": timedelta(hours=24), \"active_users_last_7_days\": timedelta(days=7),", "\" \"in the amount of signups. A list of the", "tags=[\"Admin\"], operation_id=\"admin_dashboard\", description=\"Returns the new and active users for the", "now): \"\"\" Returns the new and active users for the", "the last 24 hours, 7 days and\" \" 30 days.", "get(self, request, now): \"\"\" Returns the new and active users", "{ \"new_users_last_24_hours\": timedelta(hours=24), \"new_users_last_7_days\": timedelta(days=7), \"new_users_last_30_days\": timedelta(days=30), }, include_previous=True, )", "can be used to calculate an increase or decrease in", "401: None, }, ) @accept_timezone() def get(self, request, now): \"\"\"", "the last 30 days is also included.\\n\\nThis is a **premium**", "up \" \"from 48 to 24 hours ago. It can", "day for the last 30 days is also included. \"\"\"", "drf_spectacular.utils import extend_schema from rest_framework.response import Response from rest_framework.permissions import", "import AdminDashboardSerializer User = get_user_model() class AdminDashboardView(APIView): permission_classes = (IsAdminUser,)", "= (IsAdminUser,) @extend_schema( tags=[\"Admin\"], operation_id=\"admin_dashboard\", description=\"Returns the new and active", "signed up \" \"from 48 to 24 hours ago. It", "7 days and 30 days. The `previous_` values are the", "is also included.\\n\\nThis is a **premium** feature.\", responses={ 200: AdminDashboardSerializer,", "24 hours, 7 days and 30 days. The `previous_` values", "\"example `previous_new_users_last_24_hours` are the new users that signed up \"", "list of the new and active users for every day", "from baserow.api.decorators import accept_timezone from baserow.core.models import Group, Application from", "before, so for \" \"example `previous_new_users_last_24_hours` are the new users", "an increase or decrease \" \"in the amount of signups.", "increase or decrease \" \"in the amount of signups. A", "AdminDashboardView(APIView): permission_classes = (IsAdminUser,) @extend_schema( tags=[\"Admin\"], operation_id=\"admin_dashboard\", description=\"Returns the new", "import timedelta from django.contrib.auth import get_user_model from drf_spectacular.utils import extend_schema", "the new users that signed up \" \"from 48 to", "included.\\n\\nThis is a **premium** feature.\", responses={ 200: AdminDashboardSerializer, 401: None,", "from 48 to 24 hours ago. It can be used", "baserow.core.models import Group, Application from baserow_premium.admin.dashboard.handler import AdminDashboardHandler from .serializers", "for \" \"example `previous_new_users_last_24_hours` are the new users that signed", "that signed up from 48 to 24 hours ago. It", "timedelta(days=30), }, include_previous=True, ) new_users_per_day = handler.get_new_user_count_per_day( timedelta(days=30), now=now )", "hours ago. It can be used to calculate an increase", "the period before, so for \" \"example `previous_new_users_last_24_hours` are the", "or decrease \" \"in the amount of signups. A list", "responses={ 200: AdminDashboardSerializer, 401: None, }, ) @accept_timezone() def get(self,", "from django.contrib.auth import get_user_model from drf_spectacular.utils import extend_schema from rest_framework.response", "30 days is also included.\\n\\nThis is a **premium** feature.\", responses={", "django.contrib.auth import get_user_model from drf_spectacular.utils import extend_schema from rest_framework.response import", "\"in the amount of signups. A list of the new", "days and\" \" 30 days. The `previous_` values are the", "to calculate an increase or decrease in the amount of", "every day for the last 30 days is also included.", "30 days. The `previous_` values are the values of the", "\"active_users_last_30_days\": timedelta(days=30), }, include_previous=True, ) new_users_per_day = handler.get_new_user_count_per_day( timedelta(days=30), now=now", ") serializer = AdminDashboardSerializer( { \"total_users\": total_users, \"total_groups\": total_groups, \"total_applications\":", "operation_id=\"admin_dashboard\", description=\"Returns the new and active users for the last", "be used to calculate an increase or decrease \" \"in", "\"total_applications\": total_applications, \"new_users_per_day\": new_users_per_day, \"active_users_per_day\": active_users_per_day, **new_users, **active_users, } )", "serializer = AdminDashboardSerializer( { \"total_users\": total_users, \"total_groups\": total_groups, \"total_applications\": total_applications,", "is a **premium** feature.\", responses={ 200: AdminDashboardSerializer, 401: None, },", "hours, 7 days and 30 days. The `previous_` values are", ") active_users_per_day = handler.get_active_user_count_per_day( timedelta(days=30), now=now ) serializer = AdminDashboardSerializer(", "import extend_schema from rest_framework.response import Response from rest_framework.permissions import IsAdminUser", "(IsAdminUser,) @extend_schema( tags=[\"Admin\"], operation_id=\"admin_dashboard\", description=\"Returns the new and active users", "days is also included. \"\"\" handler = AdminDashboardHandler() total_users =", "= AdminDashboardHandler() total_users = User.objects.filter(is_active=True).count() total_groups = Group.objects.all().count() total_applications =", "total_users = User.objects.filter(is_active=True).count() total_groups = Group.objects.all().count() total_applications = Application.objects.all().count() new_users", "48 to 24 hours ago. It can be used to", "import Response from rest_framework.permissions import IsAdminUser from rest_framework.views import APIView", "the new and active users for every day for the", "timedelta(days=30), now=now ) serializer = AdminDashboardSerializer( { \"total_users\": total_users, \"total_groups\":", "\"\"\" Returns the new and active users for the last", "new and active users for every day for the last", "Application.objects.all().count() new_users = handler.get_new_user_counts( { \"new_users_last_24_hours\": timedelta(hours=24), \"new_users_last_7_days\": timedelta(days=7), \"new_users_last_30_days\":", "used to calculate an increase or decrease \" \"in the", "= Application.objects.all().count() new_users = handler.get_new_user_counts( { \"new_users_last_24_hours\": timedelta(hours=24), \"new_users_last_7_days\": timedelta(days=7),", "new and active users for the last 24 hours, 7", "{ \"active_users_last_24_hours\": timedelta(hours=24), \"active_users_last_7_days\": timedelta(days=7), \"active_users_last_30_days\": timedelta(days=30), }, include_previous=True, )", "ago. It can be used to calculate an increase or", "from baserow.core.models import Group, Application from baserow_premium.admin.dashboard.handler import AdminDashboardHandler from" ]
[ "while True: data = s.recv(1024).decode(\"utf-8\") print(data) time.sleep(0.1) def inputJob(): while", "def socketRecv(): while True: data = s.recv(1024).decode(\"utf-8\") print(data) time.sleep(0.1) def", "to queue\") def socketRecv(): while True: data = s.recv(1024).decode(\"utf-8\") print(data)", "except\") # s.close() # 關閉連線 socketThread.do_run = False # socketThread.join()", "UTF-8 -*- import sys import socket import time import threading", "time.sleep(0.1) socketThread = threading.Thread(target=socketRecv) socketThread.start() # inputThread = Thread(target=inputJob) #", "print(data) time.sleep(0.1) def inputJob(): while True: data = input() s.send(bytes(data,", "\"utf-8\")) time.sleep(0.1) socketThread = threading.Thread(target=socketRecv) socketThread.start() # inputThread = Thread(target=inputJob)", "socketRecv(): while True: data = s.recv(1024).decode(\"utf-8\") print(data) time.sleep(0.1) def inputJob():", "= socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) queue.append(s) print(\"add client to queue\")", "# 關閉連線 socketThread.do_run = False # socketThread.join() # inputThread.join() print(\"close", "True: data = input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) except KeyboardInterrupt or", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) queue.append(s) print(\"add client to queue\") def", "s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) queue.append(s) print(\"add client to", "s.recv(1024).decode(\"utf-8\") print(data) time.sleep(0.1) def inputJob(): while True: data = input()", "= '192.168.11.98' PORT = int(sys.argv[1]) queue = [] s =", "time.sleep(0.1) def inputJob(): while True: data = input() s.send(bytes(data, \"utf-8\"))", "inputJob(): while True: data = input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) socketThread", "s.connect((HOST, PORT)) queue.append(s) print(\"add client to queue\") def socketRecv(): while", "socketThread.do_run = False # socketThread.join() # inputThread.join() print(\"close thread\") sys.exit(0)", "data = input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) except KeyboardInterrupt or EOFError:", "# inputThread.start() try: while True: data = input() s.send(bytes(data, \"utf-8\"))", "input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) socketThread = threading.Thread(target=socketRecv) socketThread.start() # inputThread", "# s.close() # 關閉連線 socketThread.do_run = False # socketThread.join() #", "queue = [] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) queue.append(s)", "EOFError: print(\"in except\") # s.close() # 關閉連線 socketThread.do_run = False", "inputThread = Thread(target=inputJob) # inputThread.start() try: while True: data =", "data = input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) socketThread = threading.Thread(target=socketRecv) socketThread.start()", "Thread(target=inputJob) # inputThread.start() try: while True: data = input() s.send(bytes(data,", "queue.append(s) print(\"add client to queue\") def socketRecv(): while True: data", "socketThread = threading.Thread(target=socketRecv) socketThread.start() # inputThread = Thread(target=inputJob) # inputThread.start()", "PORT = int(sys.argv[1]) queue = [] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "# inputThread = Thread(target=inputJob) # inputThread.start() try: while True: data", "def inputJob(): while True: data = input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1)", "= int(sys.argv[1]) queue = [] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST,", "= input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) except KeyboardInterrupt or EOFError: print(\"in", "input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) except KeyboardInterrupt or EOFError: print(\"in except\")", "s.send(bytes(data, \"utf-8\")) time.sleep(0.1) socketThread = threading.Thread(target=socketRecv) socketThread.start() # inputThread =", "time.sleep(0.1) except KeyboardInterrupt or EOFError: print(\"in except\") # s.close() #", "threading.Thread(target=socketRecv) socketThread.start() # inputThread = Thread(target=inputJob) # inputThread.start() try: while", "HOST = '192.168.11.98' PORT = int(sys.argv[1]) queue = [] s", "import select HOST = '192.168.11.98' PORT = int(sys.argv[1]) queue =", "while True: data = input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) except KeyboardInterrupt", "socket import time import threading import select HOST = '192.168.11.98'", "data = s.recv(1024).decode(\"utf-8\") print(data) time.sleep(0.1) def inputJob(): while True: data", "[] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) queue.append(s) print(\"add client", "while True: data = input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) socketThread =", "= threading.Thread(target=socketRecv) socketThread.start() # inputThread = Thread(target=inputJob) # inputThread.start() try:", "queue\") def socketRecv(): while True: data = s.recv(1024).decode(\"utf-8\") print(data) time.sleep(0.1)", "except KeyboardInterrupt or EOFError: print(\"in except\") # s.close() # 關閉連線", "import socket import time import threading import select HOST =", "'192.168.11.98' PORT = int(sys.argv[1]) queue = [] s = socket.socket(socket.AF_INET,", "-*- coding: UTF-8 -*- import sys import socket import time", "select HOST = '192.168.11.98' PORT = int(sys.argv[1]) queue = []", "import threading import select HOST = '192.168.11.98' PORT = int(sys.argv[1])", "PORT)) queue.append(s) print(\"add client to queue\") def socketRecv(): while True:", "s.send(bytes(data, \"utf-8\")) time.sleep(0.1) except KeyboardInterrupt or EOFError: print(\"in except\") #", "int(sys.argv[1]) queue = [] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT))", "= s.recv(1024).decode(\"utf-8\") print(data) time.sleep(0.1) def inputJob(): while True: data =", "KeyboardInterrupt or EOFError: print(\"in except\") # s.close() # 關閉連線 socketThread.do_run", "inputThread.start() try: while True: data = input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1)", "\"utf-8\")) time.sleep(0.1) except KeyboardInterrupt or EOFError: print(\"in except\") # s.close()", "-*- import sys import socket import time import threading import", "or EOFError: print(\"in except\") # s.close() # 關閉連線 socketThread.do_run =", "s.close() # 關閉連線 socketThread.do_run = False # socketThread.join() # inputThread.join()", "True: data = s.recv(1024).decode(\"utf-8\") print(data) time.sleep(0.1) def inputJob(): while True:", "try: while True: data = input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) except", "socketThread.start() # inputThread = Thread(target=inputJob) # inputThread.start() try: while True:", "= input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) socketThread = threading.Thread(target=socketRecv) socketThread.start() #", "True: data = input() s.send(bytes(data, \"utf-8\")) time.sleep(0.1) socketThread = threading.Thread(target=socketRecv)", "threading import select HOST = '192.168.11.98' PORT = int(sys.argv[1]) queue", "import sys import socket import time import threading import select", "time import threading import select HOST = '192.168.11.98' PORT =", "print(\"add client to queue\") def socketRecv(): while True: data =", "coding: UTF-8 -*- import sys import socket import time import", "client to queue\") def socketRecv(): while True: data = s.recv(1024).decode(\"utf-8\")", "# -*- coding: UTF-8 -*- import sys import socket import", "print(\"in except\") # s.close() # 關閉連線 socketThread.do_run = False #", "關閉連線 socketThread.do_run = False # socketThread.join() # inputThread.join() print(\"close thread\")", "sys import socket import time import threading import select HOST", "= [] s = socket.socket(socket.AF_INET, socket.SOCK_STREAM) s.connect((HOST, PORT)) queue.append(s) print(\"add", "socket.SOCK_STREAM) s.connect((HOST, PORT)) queue.append(s) print(\"add client to queue\") def socketRecv():", "import time import threading import select HOST = '192.168.11.98' PORT", "= Thread(target=inputJob) # inputThread.start() try: while True: data = input()" ]
[ "trusted circles, enter the list of comma-separated IDs e.g [1,2,3]\",", "\"Indicator type to assign if a specific type is not", "} }, \"required\": [ \"classification\" ] } } } \"\"\")", "{ \"type\": \"string\", \"title\": \"Email Mapping\", \"description\": \"Indicator type to", "with an observable\", \"order\": 11 }, \"notes\": { \"type\": \"array\",", "\"string\", \"title\": \"Domain Mapping\", \"description\": \"Indicator type to assign if", "\"bytes\" }, \"filename\": { \"type\": \"string\", \"title\": \"Filename\", \"description\": \"Name", "\"title\": \"Confidence\", \"description\": \"Confidence value assigned to the observable. Confidence", "ID\", \"description\": \"ID for import session\", \"order\": 3 }, \"job_id\":", "associated with an observable\", \"order\": 11 }, \"notes\": { \"type\":", "not associated with an observable\", \"order\": 11 }, \"notes\": {", "ratio between the amount of the source confidence of each", "needs approval\", \"order\": 2 } }, \"required\": [ \"file\" ],", "ID\", \"description\": \"Job ID\", \"order\": 1 }, \"success\": { \"type\":", "import the threat data to multiple trusted circles, enter the", "\"title\": \"Threat Type\", \"description\": \"Type of threat associated with the", "= json.loads(\"\"\" { \"type\": \"object\", \"title\": \"Variables\", \"properties\": { \"results\":", "\"File\", \"description\": \"File of data to be imported into Anomali", "ImportObservableInput(komand.Input): schema = json.loads(\"\"\" { \"type\": \"object\", \"title\": \"Variables\", \"properties\":", "\"Settings needed for importing an observable that needs approval\", \"order\":", "Confidence Weight\", \"description\": \"Specifies the ratio between the amount of", "# GENERATED BY KOMAND SDK - DO NOT EDIT import", "\"description\": \"Type of threat associated with the imported observables\", \"order\":", "with an observable\", \"order\": 9 } }, \"required\": [ \"classification\"", "for import session\", \"order\": 3 }, \"job_id\": { \"type\": \"string\",", "a specific type is not associated with an observable\", \"order\":", "specific type is not associated with an observable\", \"order\": 11", "specific type is not associated with an observable\", \"order\": 7", "the source confidence of each observable and the ThreatStream confidence\",", "\"title\": \"Domain Mapping\", \"description\": \"Indicator type to assign if a", "\"file\" ], \"definitions\": { \"file\": { \"id\": \"file\", \"type\": \"object\",", "confidence\", \"order\": 1 }, \"domain_mapping\": { \"type\": \"string\", \"title\": \"Domain", "\"string\", \"title\": \"Filename\", \"description\": \"Name of file\" } } },", "\"properties\": { \"import_session_id\": { \"type\": \"string\", \"title\": \"Import Session ID\",", "\"title\": \"Job ID\", \"description\": \"Job ID\", \"order\": 1 }, \"success\":", "BY KOMAND SDK - DO NOT EDIT import komand import", "- DO NOT EDIT import komand import json class Component:", "\"title\": \"File\", \"description\": \"File Object\", \"properties\": { \"content\": { \"type\":", "should be imported. If you want to import the threat", "of data to be imported into Anomali ThreatStream\", \"order\": 1", "\"type\": \"string\", \"title\": \"MD5 Mapping\", \"description\": \"Indicator type to assign", "successful\", \"order\": 2 } } } } } \"\"\") def", "\"type\": \"array\", \"title\": \"Trusted Circles\", \"description\": \"ID of the trusted", "\"order\": 12 }, \"url_mapping\": { \"type\": \"string\", \"title\": \"URL Mapping\",", "{ \"type\": \"string\", \"title\": \"Domain Mapping\", \"description\": \"Indicator type to", "UI e.g ['note1', 'note2', 'note3']\", \"items\": { \"type\": \"string\" },", "9 } }, \"required\": [ \"classification\" ] } } }", "the ratio between the amount of the source confidence of", "confidence\", \"order\": 2 }, \"threat_type\": { \"type\": \"string\", \"title\": \"Threat", "source confidence of each observable and the ThreatStream confidence\", \"order\":", "}, \"order\": 12 }, \"url_mapping\": { \"type\": \"string\", \"title\": \"URL", "Confidence score can range from 0-100, in increasing order of", "range from 0-100, in increasing order of confidence\", \"order\": 1", "each observable and the ThreatStream confidence\", \"order\": 2 }, \"threat_type\":", "imported into Anomali ThreatStream\", \"order\": 1 }, \"observable_settings\": { \"$ref\":", "\"title\": \"Results\", \"description\": \"Results from importing observable(s)\", \"order\": 1 }", "type is not associated with an observable\", \"order\": 10 },", "\"object\", \"title\": \"File\", \"description\": \"File Object\", \"properties\": { \"content\": {", "needed for importing an observable that needs approval\", \"order\": 2", "\"object\", \"title\": \"Variables\", \"properties\": { \"file\": { \"$ref\": \"#/definitions/file\", \"title\":", "\"object\", \"title\": \"observable_settings\", \"properties\": { \"classification\": { \"type\": \"string\", \"title\":", "\"title\": \"URL Mapping\", \"description\": \"Indicator type to assign if a", "\"description\": \"File contents\", \"format\": \"bytes\" }, \"filename\": { \"type\": \"string\",", "the observable when it is imported\", \"default\": \"\", \"enum\": [", "[ \"file\" ], \"definitions\": { \"file\": { \"id\": \"file\", \"type\":", "\"File\", \"description\": \"File Object\", \"properties\": { \"content\": { \"type\": \"string\",", "\"string\", \"title\": \"Job ID\", \"description\": \"Job ID\", \"order\": 1 },", "\"order\": 4 }, \"confidence\": { \"type\": \"integer\", \"title\": \"Confidence\", \"description\":", "which this threat data should be imported. If you want", "\"title\": \"Success\", \"description\": \"If import was successful\", \"order\": 2 }", "\"description\": \"Settings needed for importing an observable that needs approval\",", "\"success\": { \"type\": \"boolean\", \"title\": \"Success\", \"description\": \"If import was", "{ \"type\": \"object\", \"title\": \"Variables\", \"properties\": { \"file\": { \"$ref\":", "\"type\": \"object\", \"title\": \"File\", \"description\": \"File Object\", \"properties\": { \"content\":", "information is displayed in the Tags column of the ThreatStream", "from importing observable(s)\", \"order\": 1 } }, \"definitions\": { \"import_observable_response\":", "\"$ref\": \"#/definitions/import_observable_response\", \"title\": \"Results\", \"description\": \"Results from importing observable(s)\", \"order\":", "in the Tags column of the ThreatStream UI e.g ['note1',", "RESULTS = \"results\" class ImportObservableInput(komand.Input): schema = json.loads(\"\"\" { \"type\":", "\"title\": \"Trusted Circles\", \"description\": \"ID of the trusted circle to", "ThreatStream UI e.g ['note1', 'note2', 'note3']\", \"items\": { \"type\": \"string\"", "\"required\": [ \"classification\" ] } } } \"\"\") def __init__(self):", "the amount of the source confidence of each observable and", "to assign to the observable when it is imported\", \"default\":", "}, \"url_mapping\": { \"type\": \"string\", \"title\": \"URL Mapping\", \"description\": \"Indicator", "\"required\": [ \"file\" ], \"definitions\": { \"file\": { \"id\": \"file\",", "\"File contents\", \"format\": \"bytes\" }, \"filename\": { \"type\": \"string\", \"title\":", "can range from 0-100, in increasing order of confidence\", \"order\":", "{ \"type\": \"boolean\", \"title\": \"Success\", \"description\": \"If import was successful\",", "to which this threat data should be imported. If you", "0-100, in increasing order of confidence\", \"order\": 1 }, \"domain_mapping\":", "}, \"required\": [ \"file\" ], \"definitions\": { \"file\": { \"id\":", "\"displayType\": \"date\", \"description\": \"Time stamp of when intelligence will expire", "\"description\": \"Results from importing observable(s)\", \"order\": 1 } }, \"definitions\":", "threat data to multiple trusted circles, enter the list of", "trusted circle to which this threat data should be imported.", "\"title\": \"Variables\", \"properties\": { \"file\": { \"$ref\": \"#/definitions/file\", \"title\": \"File\",", "\"title\": \"import_observable_response\", \"properties\": { \"import_session_id\": { \"type\": \"string\", \"title\": \"Import", "ThreatStream confidence\", \"order\": 2 }, \"threat_type\": { \"type\": \"string\", \"title\":", "{ \"type\": \"object\", \"title\": \"import_observable_response\", \"properties\": { \"import_session_id\": { \"type\":", "\"title\": \"IP Mapping\", \"description\": \"Indicator type to assign if a", "confidence of each observable and the ThreatStream confidence\", \"order\": 2", "\"type\": \"string\", \"title\": \"Filename\", \"description\": \"Name of file\" } }", "class Output: RESULTS = \"results\" class ImportObservableInput(komand.Input): schema = json.loads(\"\"\"", "is not associated with an observable\", \"order\": 11 }, \"notes\":", "\"observable_settings\": { \"type\": \"object\", \"title\": \"observable_settings\", \"properties\": { \"classification\": {", "\"description\": \"ID of the trusted circle to which this threat", "[1,2,3]\", \"items\": { \"type\": \"integer\" }, \"order\": 12 }, \"url_mapping\":", "\"description\": \"Indicator type to assign if a specific type is", "} }, \"observable_settings\": { \"type\": \"object\", \"title\": \"observable_settings\", \"properties\": {", "with an observable\", \"order\": 10 }, \"expiration_ts\": { \"type\": \"string\",", "\"title\": \"Email Mapping\", \"description\": \"Indicator type to assign if a", "\"import_observable_response\", \"properties\": { \"import_session_id\": { \"type\": \"string\", \"title\": \"Import Session", "\"order\": 2 } } } } } \"\"\") def __init__(self):", "\"title\": \"Source Confidence Weight\", \"description\": \"Specifies the ratio between the", "= \"results\" class ImportObservableInput(komand.Input): schema = json.loads(\"\"\" { \"type\": \"object\",", "and the ThreatStream confidence\", \"order\": 2 }, \"threat_type\": { \"type\":", "\"confidence\": { \"type\": \"integer\", \"title\": \"Confidence\", \"description\": \"Confidence value assigned", "of each observable and the ThreatStream confidence\", \"order\": 2 },", "\"trustedcircles\": { \"type\": \"array\", \"title\": \"Trusted Circles\", \"description\": \"ID of", "type to assign if a specific type is not associated", "12 }, \"url_mapping\": { \"type\": \"string\", \"title\": \"URL Mapping\", \"description\":", "OBSERVABLE_SETTINGS = \"observable_settings\" class Output: RESULTS = \"results\" class ImportObservableInput(komand.Input):", "\"order\": 11 }, \"notes\": { \"type\": \"array\", \"title\": \"Notes\", \"description\":", "} \"\"\") def __init__(self): super(self.__class__, self).__init__(self.schema) class ImportObservableOutput(komand.Output): schema =", "\"public\", \"private\" ], \"order\": 4 }, \"confidence\": { \"type\": \"integer\",", "not associated with an observable\", \"order\": 7 }, \"md5_mapping\": {", "import json class Component: DESCRIPTION = \"Import observable(s) into Anomali", "amount of the source confidence of each observable and the", "value assigned to the observable. Confidence score can range from", "you want to import the threat data to multiple trusted", "to import the threat data to multiple trusted circles, enter", "\"Import Session ID\", \"description\": \"ID for import session\", \"order\": 3", "2 } }, \"required\": [ \"file\" ], \"definitions\": { \"file\":", "\"email_mapping\": { \"type\": \"string\", \"title\": \"Email Mapping\", \"description\": \"Indicator type", "\"type\": \"string\", \"title\": \"Import Session ID\", \"description\": \"ID for import", "\"string\", \"title\": \"Import Session ID\", \"description\": \"ID for import session\",", "Weight\", \"description\": \"Specifies the ratio between the amount of the", "}, \"observable_settings\": { \"type\": \"object\", \"title\": \"observable_settings\", \"properties\": { \"classification\":", "\"description\": \"Time stamp of when intelligence will expire on ThreatStream\",", "\"domain_mapping\": { \"type\": \"string\", \"title\": \"Domain Mapping\", \"description\": \"Indicator type", "displayed in the Tags column of the ThreatStream UI e.g", "on ThreatStream\", \"format\": \"date-time\", \"order\": 5 }, \"ip_mapping\": { \"type\":", "], \"order\": 3 }, \"source_confidence_weight\": { \"type\": \"integer\", \"title\": \"Source", "is not associated with an observable\", \"order\": 7 }, \"md5_mapping\":", "\"observable_settings\" class Output: RESULTS = \"results\" class ImportObservableInput(komand.Input): schema =", "Circles\", \"description\": \"ID of the trusted circle to which this", "import session\", \"order\": 3 }, \"job_id\": { \"type\": \"string\", \"title\":", "ThreatStream with approval\" class Input: FILE = \"file\" OBSERVABLE_SETTINGS =", "\"type\": \"string\", \"title\": \"Domain Mapping\", \"description\": \"Indicator type to assign", "{ \"type\": \"array\", \"title\": \"Notes\", \"description\": \"Additional details for the", "an observable\", \"order\": 10 }, \"expiration_ts\": { \"type\": \"string\", \"title\":", "}, \"md5_mapping\": { \"type\": \"string\", \"title\": \"MD5 Mapping\", \"description\": \"Indicator", "\"id\": \"file\", \"type\": \"object\", \"title\": \"File\", \"description\": \"File Object\", \"properties\":", "\"ID for import session\", \"order\": 3 }, \"job_id\": { \"type\":", "observable\", \"order\": 7 }, \"md5_mapping\": { \"type\": \"string\", \"title\": \"MD5", "\"threat_type\": { \"type\": \"string\", \"title\": \"Threat Type\", \"description\": \"Type of", "\"results\": { \"$ref\": \"#/definitions/import_observable_response\", \"title\": \"Results\", \"description\": \"Results from importing", "Input: FILE = \"file\" OBSERVABLE_SETTINGS = \"observable_settings\" class Output: RESULTS", "be imported. If you want to import the threat data", "\"results\" class ImportObservableInput(komand.Input): schema = json.loads(\"\"\" { \"type\": \"object\", \"title\":", "\"properties\": { \"file\": { \"$ref\": \"#/definitions/file\", \"title\": \"File\", \"description\": \"File", "want to import the threat data to multiple trusted circles,", "the observable\", \"default\": \"private\", \"enum\": [ \"public\", \"private\" ], \"order\":", "imported\", \"default\": \"\", \"enum\": [ \"low\", \"medium\", \"high\", \"very-high\", \"\"", "\"content\": { \"type\": \"string\", \"title\": \"Content\", \"description\": \"File contents\", \"format\":", "\"order\": 3 }, \"source_confidence_weight\": { \"type\": \"integer\", \"title\": \"Source Confidence", "of the ThreatStream UI e.g ['note1', 'note2', 'note3']\", \"items\": {", "class Input: FILE = \"file\" OBSERVABLE_SETTINGS = \"observable_settings\" class Output:", "\"Variables\", \"properties\": { \"results\": { \"$ref\": \"#/definitions/import_observable_response\", \"title\": \"Results\", \"description\":", "}, \"success\": { \"type\": \"boolean\", \"title\": \"Success\", \"description\": \"If import", "}, \"order\": 6 }, \"severity\": { \"type\": \"string\", \"title\": \"Severity\",", "observable and the ThreatStream confidence\", \"order\": 2 }, \"threat_type\": {", "\"import_observable_response\": { \"type\": \"object\", \"title\": \"import_observable_response\", \"properties\": { \"import_session_id\": {", "\"description\": \"Name of file\" } } }, \"observable_settings\": { \"type\":", "\"type\": \"integer\", \"title\": \"Source Confidence Weight\", \"description\": \"Specifies the ratio", "\"Success\", \"description\": \"If import was successful\", \"order\": 2 } }", "\"classification\": { \"type\": \"string\", \"title\": \"Classification\", \"description\": \"Classification of the", "import komand import json class Component: DESCRIPTION = \"Import observable(s)", "\"File of data to be imported into Anomali ThreatStream\", \"order\":", "\"order\": 2 } }, \"required\": [ \"file\" ], \"definitions\": {", "\"object\", \"title\": \"Variables\", \"properties\": { \"results\": { \"$ref\": \"#/definitions/import_observable_response\", \"title\":", "}, \"ip_mapping\": { \"type\": \"string\", \"title\": \"IP Mapping\", \"description\": \"Indicator", "from 0-100, in increasing order of confidence\", \"order\": 1 },", "self).__init__(self.schema) class ImportObservableOutput(komand.Output): schema = json.loads(\"\"\" { \"type\": \"object\", \"title\":", "\"file\", \"type\": \"object\", \"title\": \"File\", \"description\": \"File Object\", \"properties\": {", "Tags column of the ThreatStream UI e.g ['note1', 'note2', 'note3']\",", "}, \"observable_settings\": { \"$ref\": \"#/definitions/observable_settings\", \"title\": \"Observable Settings\", \"description\": \"Settings", "{ \"$ref\": \"#/definitions/import_observable_response\", \"title\": \"Results\", \"description\": \"Results from importing observable(s)\",", "an observable\", \"order\": 9 } }, \"required\": [ \"classification\" ]", "{ \"type\": \"string\", \"title\": \"Expiration Time Stamp\", \"displayType\": \"date\", \"description\":", "{ \"results\": { \"$ref\": \"#/definitions/import_observable_response\", \"title\": \"Results\", \"description\": \"Results from", "\"string\", \"title\": \"Content\", \"description\": \"File contents\", \"format\": \"bytes\" }, \"filename\":", "\"type\": \"boolean\", \"title\": \"Success\", \"description\": \"If import was successful\", \"order\":", "\"job_id\": { \"type\": \"string\", \"title\": \"Job ID\", \"description\": \"Job ID\",", "13 }, \"trustedcircles\": { \"type\": \"array\", \"title\": \"Trusted Circles\", \"description\":", "Session ID\", \"description\": \"ID for import session\", \"order\": 3 },", "\"\"\") def __init__(self): super(self.__class__, self).__init__(self.schema) class ImportObservableOutput(komand.Output): schema = json.loads(\"\"\"", "assign if a specific type is not associated with an", "observable\", \"order\": 8 }, \"email_mapping\": { \"type\": \"string\", \"title\": \"Email", "assign to the observable when it is imported\", \"default\": \"\",", "\"format\": \"bytes\" }, \"filename\": { \"type\": \"string\", \"title\": \"Filename\", \"description\":", "}, \"filename\": { \"type\": \"string\", \"title\": \"Filename\", \"description\": \"Name of", "observable(s) into Anomali ThreatStream with approval\" class Input: FILE =", "\"enum\": [ \"low\", \"medium\", \"high\", \"very-high\", \"\" ], \"order\": 3", "\"definitions\": { \"file\": { \"id\": \"file\", \"type\": \"object\", \"title\": \"File\",", "}, \"email_mapping\": { \"type\": \"string\", \"title\": \"Email Mapping\", \"description\": \"Indicator", "of threat associated with the imported observables\", \"order\": 13 },", "data should be imported. If you want to import the", "{ \"id\": \"file\", \"type\": \"object\", \"title\": \"File\", \"description\": \"File Object\",", "\"Time stamp of when intelligence will expire on ThreatStream\", \"format\":", "['note1', 'note2', 'note3']\", \"items\": { \"type\": \"string\" }, \"order\": 6", "an observable\", \"order\": 8 }, \"email_mapping\": { \"type\": \"string\", \"title\":", "for importing an observable that needs approval\", \"order\": 2 }", "observable. Confidence score can range from 0-100, in increasing order", "{ \"type\": \"integer\" }, \"order\": 12 }, \"url_mapping\": { \"type\":", "}, \"domain_mapping\": { \"type\": \"string\", \"title\": \"Domain Mapping\", \"description\": \"Indicator", "\"string\", \"title\": \"Expiration Time Stamp\", \"displayType\": \"date\", \"description\": \"Time stamp", "\"title\": \"Content\", \"description\": \"File contents\", \"format\": \"bytes\" }, \"filename\": {", "observables\", \"order\": 13 }, \"trustedcircles\": { \"type\": \"array\", \"title\": \"Trusted", "be imported into Anomali ThreatStream\", \"order\": 1 }, \"observable_settings\": {", "\"description\": \"File of data to be imported into Anomali ThreatStream\",", "circle to which this threat data should be imported. If", "'note2', 'note3']\", \"items\": { \"type\": \"string\" }, \"order\": 6 },", "\"string\" }, \"order\": 6 }, \"severity\": { \"type\": \"string\", \"title\":", "intelligence will expire on ThreatStream\", \"format\": \"date-time\", \"order\": 5 },", "\"MD5 Mapping\", \"description\": \"Indicator type to assign if a specific", "want to assign to the observable when it is imported\",", "3 }, \"job_id\": { \"type\": \"string\", \"title\": \"Job ID\", \"description\":", "\"file\" OBSERVABLE_SETTINGS = \"observable_settings\" class Output: RESULTS = \"results\" class", "circles, enter the list of comma-separated IDs e.g [1,2,3]\", \"items\":", "Stamp\", \"displayType\": \"date\", \"description\": \"Time stamp of when intelligence will", "observable\", \"order\": 11 }, \"notes\": { \"type\": \"array\", \"title\": \"Notes\",", "with an observable\", \"order\": 8 }, \"email_mapping\": { \"type\": \"string\",", "\"enum\": [ \"public\", \"private\" ], \"order\": 4 }, \"confidence\": {", "of the observable\", \"default\": \"private\", \"enum\": [ \"public\", \"private\" ],", "the threat data to multiple trusted circles, enter the list", "\"type\": \"string\", \"title\": \"Severity\", \"description\": \"Severity you want to assign", "\"string\", \"title\": \"IP Mapping\", \"description\": \"Indicator type to assign if", "Settings\", \"description\": \"Settings needed for importing an observable that needs", "imported observables\", \"order\": 13 }, \"trustedcircles\": { \"type\": \"array\", \"title\":", "class ImportObservableOutput(komand.Output): schema = json.loads(\"\"\" { \"type\": \"object\", \"title\": \"Variables\",", "is displayed in the Tags column of the ThreatStream UI", "with the imported observables\", \"order\": 13 }, \"trustedcircles\": { \"type\":", "\"array\", \"title\": \"Trusted Circles\", \"description\": \"ID of the trusted circle", "{ \"type\": \"string\" }, \"order\": 6 }, \"severity\": { \"type\":", "an observable\", \"order\": 7 }, \"md5_mapping\": { \"type\": \"string\", \"title\":", "\"Import observable(s) into Anomali ThreatStream with approval\" class Input: FILE", "\"Type of threat associated with the imported observables\", \"order\": 13", "{ \"import_observable_response\": { \"type\": \"object\", \"title\": \"import_observable_response\", \"properties\": { \"import_session_id\":", "expire on ThreatStream\", \"format\": \"date-time\", \"order\": 5 }, \"ip_mapping\": {", "to assign if a specific type is not associated with", "} }, \"definitions\": { \"import_observable_response\": { \"type\": \"object\", \"title\": \"import_observable_response\",", "\"string\", \"title\": \"Email Mapping\", \"description\": \"Indicator type to assign if", "}, \"expiration_ts\": { \"type\": \"string\", \"title\": \"Expiration Time Stamp\", \"displayType\":", "\"Classification of the observable\", \"default\": \"private\", \"enum\": [ \"public\", \"private\"", "ImportObservableOutput(komand.Output): schema = json.loads(\"\"\" { \"type\": \"object\", \"title\": \"Variables\", \"properties\":", "GENERATED BY KOMAND SDK - DO NOT EDIT import komand", "\"Notes\", \"description\": \"Additional details for the observable. This information is", "{ \"type\": \"array\", \"title\": \"Trusted Circles\", \"description\": \"ID of the", "the ThreatStream UI e.g ['note1', 'note2', 'note3']\", \"items\": { \"type\":", "\"order\": 5 }, \"ip_mapping\": { \"type\": \"string\", \"title\": \"IP Mapping\",", "type is not associated with an observable\", \"order\": 7 },", "\"Email Mapping\", \"description\": \"Indicator type to assign if a specific", "specific type is not associated with an observable\", \"order\": 9", "\"order\": 1 }, \"domain_mapping\": { \"type\": \"string\", \"title\": \"Domain Mapping\",", "\"title\": \"MD5 Mapping\", \"description\": \"Indicator type to assign if a", "[ \"low\", \"medium\", \"high\", \"very-high\", \"\" ], \"order\": 3 },", "{ \"type\": \"integer\", \"title\": \"Source Confidence Weight\", \"description\": \"Specifies the", "def __init__(self): super(self.__class__, self).__init__(self.schema) class ImportObservableOutput(komand.Output): schema = json.loads(\"\"\" {", "\"format\": \"date-time\", \"order\": 5 }, \"ip_mapping\": { \"type\": \"string\", \"title\":", "json.loads(\"\"\" { \"type\": \"object\", \"title\": \"Variables\", \"properties\": { \"results\": {", "EDIT import komand import json class Component: DESCRIPTION = \"Import", "this threat data should be imported. If you want to", "\"order\": 1 } }, \"definitions\": { \"import_observable_response\": { \"type\": \"object\",", "\"type\": \"string\", \"title\": \"Content\", \"description\": \"File contents\", \"format\": \"bytes\" },", "the trusted circle to which this threat data should be", "you want to assign to the observable when it is", "\"URL Mapping\", \"description\": \"Indicator type to assign if a specific", "= \"Import observable(s) into Anomali ThreatStream with approval\" class Input:", "with approval\" class Input: FILE = \"file\" OBSERVABLE_SETTINGS = \"observable_settings\"", "= json.loads(\"\"\" { \"type\": \"object\", \"title\": \"Variables\", \"properties\": { \"file\":", "\"definitions\": { \"import_observable_response\": { \"type\": \"object\", \"title\": \"import_observable_response\", \"properties\": {", "} } } } } \"\"\") def __init__(self): super(self.__class__, self).__init__(self.schema)", "not associated with an observable\", \"order\": 8 }, \"email_mapping\": {", "{ \"type\": \"string\", \"title\": \"Import Session ID\", \"description\": \"ID for", "\"title\": \"File\", \"description\": \"File of data to be imported into", "NOT EDIT import komand import json class Component: DESCRIPTION =", "\"order\": 1 }, \"success\": { \"type\": \"boolean\", \"title\": \"Success\", \"description\":", "\"very-high\", \"\" ], \"order\": 3 }, \"source_confidence_weight\": { \"type\": \"integer\",", "the list of comma-separated IDs e.g [1,2,3]\", \"items\": { \"type\":", "imported. If you want to import the threat data to", "\"Observable Settings\", \"description\": \"Settings needed for importing an observable that", "\"type\": \"object\", \"title\": \"import_observable_response\", \"properties\": { \"import_session_id\": { \"type\": \"string\",", "} }, \"required\": [ \"file\" ], \"definitions\": { \"file\": {", "Component: DESCRIPTION = \"Import observable(s) into Anomali ThreatStream with approval\"", "\"Classification\", \"description\": \"Classification of the observable\", \"default\": \"private\", \"enum\": [", "8 }, \"email_mapping\": { \"type\": \"string\", \"title\": \"Email Mapping\", \"description\":", "6 }, \"severity\": { \"type\": \"string\", \"title\": \"Severity\", \"description\": \"Severity", "IDs e.g [1,2,3]\", \"items\": { \"type\": \"integer\" }, \"order\": 12", "between the amount of the source confidence of each observable", "{ \"file\": { \"$ref\": \"#/definitions/file\", \"title\": \"File\", \"description\": \"File of", "of when intelligence will expire on ThreatStream\", \"format\": \"date-time\", \"order\":", "3 }, \"source_confidence_weight\": { \"type\": \"integer\", \"title\": \"Source Confidence Weight\",", "{ \"content\": { \"type\": \"string\", \"title\": \"Content\", \"description\": \"File contents\",", "\"title\": \"Variables\", \"properties\": { \"results\": { \"$ref\": \"#/definitions/import_observable_response\", \"title\": \"Results\",", "\"type\": \"string\", \"title\": \"Threat Type\", \"description\": \"Type of threat associated", "list of comma-separated IDs e.g [1,2,3]\", \"items\": { \"type\": \"integer\"", "10 }, \"expiration_ts\": { \"type\": \"string\", \"title\": \"Expiration Time Stamp\",", "into Anomali ThreatStream with approval\" class Input: FILE = \"file\"", "\"Severity\", \"description\": \"Severity you want to assign to the observable", "\"$ref\": \"#/definitions/observable_settings\", \"title\": \"Observable Settings\", \"description\": \"Settings needed for importing", "associated with an observable\", \"order\": 9 } }, \"required\": [", "2 }, \"threat_type\": { \"type\": \"string\", \"title\": \"Threat Type\", \"description\":", "is imported\", \"default\": \"\", \"enum\": [ \"low\", \"medium\", \"high\", \"very-high\",", "\"type\": \"string\", \"title\": \"IP Mapping\", \"description\": \"Indicator type to assign", "data to multiple trusted circles, enter the list of comma-separated", "to multiple trusted circles, enter the list of comma-separated IDs", "threat data should be imported. If you want to import", "\"type\": \"string\", \"title\": \"Job ID\", \"description\": \"Job ID\", \"order\": 1", "\"integer\", \"title\": \"Confidence\", \"description\": \"Confidence value assigned to the observable.", "\"md5_mapping\": { \"type\": \"string\", \"title\": \"MD5 Mapping\", \"description\": \"Indicator type", "observable\", \"order\": 9 } }, \"required\": [ \"classification\" ] }", "\"type\": \"integer\", \"title\": \"Confidence\", \"description\": \"Confidence value assigned to the", "when it is imported\", \"default\": \"\", \"enum\": [ \"low\", \"medium\",", "observable\", \"default\": \"private\", \"enum\": [ \"public\", \"private\" ], \"order\": 4", "{ \"type\": \"integer\", \"title\": \"Confidence\", \"description\": \"Confidence value assigned to", "\"url_mapping\": { \"type\": \"string\", \"title\": \"URL Mapping\", \"description\": \"Indicator type", "= \"observable_settings\" class Output: RESULTS = \"results\" class ImportObservableInput(komand.Input): schema", "1 }, \"domain_mapping\": { \"type\": \"string\", \"title\": \"Domain Mapping\", \"description\":", "associated with the imported observables\", \"order\": 13 }, \"trustedcircles\": {", "\"title\": \"Import Session ID\", \"description\": \"ID for import session\", \"order\":", "}, \"job_id\": { \"type\": \"string\", \"title\": \"Job ID\", \"description\": \"Job", "the Tags column of the ThreatStream UI e.g ['note1', 'note2',", "\"order\": 8 }, \"email_mapping\": { \"type\": \"string\", \"title\": \"Email Mapping\",", "\"Confidence\", \"description\": \"Confidence value assigned to the observable. Confidence score", "observable when it is imported\", \"default\": \"\", \"enum\": [ \"low\",", "assigned to the observable. Confidence score can range from 0-100,", "not associated with an observable\", \"order\": 9 } }, \"required\":", "} } }, \"observable_settings\": { \"type\": \"object\", \"title\": \"observable_settings\", \"properties\":", "associated with an observable\", \"order\": 8 }, \"email_mapping\": { \"type\":", "\"#/definitions/file\", \"title\": \"File\", \"description\": \"File of data to be imported", "stamp of when intelligence will expire on ThreatStream\", \"format\": \"date-time\",", "\"ip_mapping\": { \"type\": \"string\", \"title\": \"IP Mapping\", \"description\": \"Indicator type", "\"order\": 13 }, \"trustedcircles\": { \"type\": \"array\", \"title\": \"Trusted Circles\",", "\"description\": \"Classification of the observable\", \"default\": \"private\", \"enum\": [ \"public\",", "threat associated with the imported observables\", \"order\": 13 }, \"trustedcircles\":", "komand import json class Component: DESCRIPTION = \"Import observable(s) into", "Output: RESULTS = \"results\" class ImportObservableInput(komand.Input): schema = json.loads(\"\"\" {", "\"type\": \"object\", \"title\": \"Variables\", \"properties\": { \"results\": { \"$ref\": \"#/definitions/import_observable_response\",", "class Component: DESCRIPTION = \"Import observable(s) into Anomali ThreatStream with", "\"Filename\", \"description\": \"Name of file\" } } }, \"observable_settings\": {", "\"order\": 1 }, \"observable_settings\": { \"$ref\": \"#/definitions/observable_settings\", \"title\": \"Observable Settings\",", "\"Specifies the ratio between the amount of the source confidence", "ID\", \"order\": 1 }, \"success\": { \"type\": \"boolean\", \"title\": \"Success\",", "\"order\": 9 } }, \"required\": [ \"classification\" ] } }", "\"IP Mapping\", \"description\": \"Indicator type to assign if a specific", "is not associated with an observable\", \"order\": 10 }, \"expiration_ts\":", "\"medium\", \"high\", \"very-high\", \"\" ], \"order\": 3 }, \"source_confidence_weight\": {", "class ImportObservableInput(komand.Input): schema = json.loads(\"\"\" { \"type\": \"object\", \"title\": \"Variables\",", "\"title\": \"Filename\", \"description\": \"Name of file\" } } }, \"observable_settings\":", "\"description\": \"Confidence value assigned to the observable. Confidence score can", "}, \"confidence\": { \"type\": \"integer\", \"title\": \"Confidence\", \"description\": \"Confidence value", "comma-separated IDs e.g [1,2,3]\", \"items\": { \"type\": \"integer\" }, \"order\":", "approval\" class Input: FILE = \"file\" OBSERVABLE_SETTINGS = \"observable_settings\" class", "\"low\", \"medium\", \"high\", \"very-high\", \"\" ], \"order\": 3 }, \"source_confidence_weight\":", "\"private\", \"enum\": [ \"public\", \"private\" ], \"order\": 4 }, \"confidence\":", "increasing order of confidence\", \"order\": 1 }, \"domain_mapping\": { \"type\":", "DESCRIPTION = \"Import observable(s) into Anomali ThreatStream with approval\" class", "schema = json.loads(\"\"\" { \"type\": \"object\", \"title\": \"Variables\", \"properties\": {", "\"File Object\", \"properties\": { \"content\": { \"type\": \"string\", \"title\": \"Content\",", "FILE = \"file\" OBSERVABLE_SETTINGS = \"observable_settings\" class Output: RESULTS =", "{ \"type\": \"string\", \"title\": \"Severity\", \"description\": \"Severity you want to", "column of the ThreatStream UI e.g ['note1', 'note2', 'note3']\", \"items\":", "with an observable\", \"order\": 7 }, \"md5_mapping\": { \"type\": \"string\",", "\"\" ], \"order\": 3 }, \"source_confidence_weight\": { \"type\": \"integer\", \"title\":", "in increasing order of confidence\", \"order\": 1 }, \"domain_mapping\": {", "\"description\": \"Severity you want to assign to the observable when", "{ \"type\": \"string\", \"title\": \"Threat Type\", \"description\": \"Type of threat", "\"order\": 6 }, \"severity\": { \"type\": \"string\", \"title\": \"Severity\", \"description\":", "\"Severity you want to assign to the observable when it", "This information is displayed in the Tags column of the", "approval\", \"order\": 2 } }, \"required\": [ \"file\" ], \"definitions\":", "Time Stamp\", \"displayType\": \"date\", \"description\": \"Time stamp of when intelligence", "\"Confidence value assigned to the observable. Confidence score can range", "\"source_confidence_weight\": { \"type\": \"integer\", \"title\": \"Source Confidence Weight\", \"description\": \"Specifies", "11 }, \"notes\": { \"type\": \"array\", \"title\": \"Notes\", \"description\": \"Additional", "Anomali ThreatStream\", \"order\": 1 }, \"observable_settings\": { \"$ref\": \"#/definitions/observable_settings\", \"title\":", "multiple trusted circles, enter the list of comma-separated IDs e.g", "[ \"public\", \"private\" ], \"order\": 4 }, \"confidence\": { \"type\":", "\"properties\": { \"content\": { \"type\": \"string\", \"title\": \"Content\", \"description\": \"File", "specific type is not associated with an observable\", \"order\": 10", "\"description\": \"ID for import session\", \"order\": 3 }, \"job_id\": {", "{ \"type\": \"string\", \"title\": \"Content\", \"description\": \"File contents\", \"format\": \"bytes\"", "{ \"$ref\": \"#/definitions/observable_settings\", \"title\": \"Observable Settings\", \"description\": \"Settings needed for", "\"\", \"enum\": [ \"low\", \"medium\", \"high\", \"very-high\", \"\" ], \"order\":", "of the trusted circle to which this threat data should", "\"string\", \"title\": \"Classification\", \"description\": \"Classification of the observable\", \"default\": \"private\",", "\"title\": \"Expiration Time Stamp\", \"displayType\": \"date\", \"description\": \"Time stamp of", "5 }, \"ip_mapping\": { \"type\": \"string\", \"title\": \"IP Mapping\", \"description\":", "was successful\", \"order\": 2 } } } } } \"\"\")", "an observable\", \"order\": 11 }, \"notes\": { \"type\": \"array\", \"title\":", "the ThreatStream confidence\", \"order\": 2 }, \"threat_type\": { \"type\": \"string\",", "\"Results\", \"description\": \"Results from importing observable(s)\", \"order\": 1 } },", "importing observable(s)\", \"order\": 1 } }, \"definitions\": { \"import_observable_response\": {", "to be imported into Anomali ThreatStream\", \"order\": 1 }, \"observable_settings\":", "{ \"classification\": { \"type\": \"string\", \"title\": \"Classification\", \"description\": \"Classification of", "\"items\": { \"type\": \"string\" }, \"order\": 6 }, \"severity\": {", "\"Source Confidence Weight\", \"description\": \"Specifies the ratio between the amount", "\"Content\", \"description\": \"File contents\", \"format\": \"bytes\" }, \"filename\": { \"type\":", "\"description\": \"Additional details for the observable. This information is displayed", "= \"file\" OBSERVABLE_SETTINGS = \"observable_settings\" class Output: RESULTS = \"results\"", "{ \"type\": \"object\", \"title\": \"observable_settings\", \"properties\": { \"classification\": { \"type\":", "} } \"\"\") def __init__(self): super(self.__class__, self).__init__(self.schema) class ImportObservableOutput(komand.Output): schema", "session\", \"order\": 3 }, \"job_id\": { \"type\": \"string\", \"title\": \"Job", "{ \"type\": \"string\", \"title\": \"Filename\", \"description\": \"Name of file\" }", "it is imported\", \"default\": \"\", \"enum\": [ \"low\", \"medium\", \"high\",", "\"integer\", \"title\": \"Source Confidence Weight\", \"description\": \"Specifies the ratio between", "}, \"source_confidence_weight\": { \"type\": \"integer\", \"title\": \"Source Confidence Weight\", \"description\":", "{ \"type\": \"string\", \"title\": \"Job ID\", \"description\": \"Job ID\", \"order\":", "observable(s)\", \"order\": 1 } }, \"definitions\": { \"import_observable_response\": { \"type\":", "\"file\": { \"id\": \"file\", \"type\": \"object\", \"title\": \"File\", \"description\": \"File", "{ \"type\": \"string\", \"title\": \"IP Mapping\", \"description\": \"Indicator type to", "\"default\": \"private\", \"enum\": [ \"public\", \"private\" ], \"order\": 4 },", "\"type\": \"object\", \"title\": \"Variables\", \"properties\": { \"file\": { \"$ref\": \"#/definitions/file\",", "{ \"import_session_id\": { \"type\": \"string\", \"title\": \"Import Session ID\", \"description\":", "\"type\": \"string\" }, \"order\": 6 }, \"severity\": { \"type\": \"string\",", "of the source confidence of each observable and the ThreatStream", "2 } } } } } \"\"\") def __init__(self): super(self.__class__,", "\"description\": \"If import was successful\", \"order\": 2 } } }", "enter the list of comma-separated IDs e.g [1,2,3]\", \"items\": {", "] } } } \"\"\") def __init__(self): super(self.__class__, self).__init__(self.schema) class", "of file\" } } }, \"observable_settings\": { \"type\": \"object\", \"title\":", "type is not associated with an observable\", \"order\": 8 },", "e.g [1,2,3]\", \"items\": { \"type\": \"integer\" }, \"order\": 12 },", "}, \"threat_type\": { \"type\": \"string\", \"title\": \"Threat Type\", \"description\": \"Type", "\"properties\": { \"results\": { \"$ref\": \"#/definitions/import_observable_response\", \"title\": \"Results\", \"description\": \"Results", "\"type\": \"object\", \"title\": \"observable_settings\", \"properties\": { \"classification\": { \"type\": \"string\",", "DO NOT EDIT import komand import json class Component: DESCRIPTION", "}, \"severity\": { \"type\": \"string\", \"title\": \"Severity\", \"description\": \"Severity you", "an observable that needs approval\", \"order\": 2 } }, \"required\":", "{ \"type\": \"object\", \"title\": \"Variables\", \"properties\": { \"results\": { \"$ref\":", "\"ID of the trusted circle to which this threat data", "specific type is not associated with an observable\", \"order\": 8", "\"array\", \"title\": \"Notes\", \"description\": \"Additional details for the observable. This", "the observable. This information is displayed in the Tags column", "\"private\" ], \"order\": 4 }, \"confidence\": { \"type\": \"integer\", \"title\":", "into Anomali ThreatStream\", \"order\": 1 }, \"observable_settings\": { \"$ref\": \"#/definitions/observable_settings\",", "\"classification\" ] } } } \"\"\") def __init__(self): super(self.__class__, self).__init__(self.schema)", "details for the observable. This information is displayed in the", "\"type\": \"array\", \"title\": \"Notes\", \"description\": \"Additional details for the observable.", "\"order\": 10 }, \"expiration_ts\": { \"type\": \"string\", \"title\": \"Expiration Time", "will expire on ThreatStream\", \"format\": \"date-time\", \"order\": 5 }, \"ip_mapping\":", "__init__(self): super(self.__class__, self).__init__(self.schema) class ImportObservableOutput(komand.Output): schema = json.loads(\"\"\" { \"type\":", "ThreatStream\", \"order\": 1 }, \"observable_settings\": { \"$ref\": \"#/definitions/observable_settings\", \"title\": \"Observable", "[ \"classification\" ] } } } \"\"\") def __init__(self): super(self.__class__,", "\"#/definitions/import_observable_response\", \"title\": \"Results\", \"description\": \"Results from importing observable(s)\", \"order\": 1", "\"title\": \"Severity\", \"description\": \"Severity you want to assign to the", "\"title\": \"observable_settings\", \"properties\": { \"classification\": { \"type\": \"string\", \"title\": \"Classification\",", "\"file\": { \"$ref\": \"#/definitions/file\", \"title\": \"File\", \"description\": \"File of data", "\"date\", \"description\": \"Time stamp of when intelligence will expire on", "ThreatStream\", \"format\": \"date-time\", \"order\": 5 }, \"ip_mapping\": { \"type\": \"string\",", "data to be imported into Anomali ThreatStream\", \"order\": 1 },", "\"Domain Mapping\", \"description\": \"Indicator type to assign if a specific", "super(self.__class__, self).__init__(self.schema) class ImportObservableOutput(komand.Output): schema = json.loads(\"\"\" { \"type\": \"object\",", "Object\", \"properties\": { \"content\": { \"type\": \"string\", \"title\": \"Content\", \"description\":", "to the observable. Confidence score can range from 0-100, in", "\"string\", \"title\": \"MD5 Mapping\", \"description\": \"Indicator type to assign if", "\"type\": \"string\", \"title\": \"Classification\", \"description\": \"Classification of the observable\", \"default\":", "\"description\": \"Job ID\", \"order\": 1 }, \"success\": { \"type\": \"boolean\",", "\"$ref\": \"#/definitions/file\", \"title\": \"File\", \"description\": \"File of data to be", "not associated with an observable\", \"order\": 10 }, \"expiration_ts\": {", "associated with an observable\", \"order\": 7 }, \"md5_mapping\": { \"type\":", "import was successful\", \"order\": 2 } } } } }", "}, \"definitions\": { \"import_observable_response\": { \"type\": \"object\", \"title\": \"import_observable_response\", \"properties\":", "order of confidence\", \"order\": 1 }, \"domain_mapping\": { \"type\": \"string\",", "\"properties\": { \"classification\": { \"type\": \"string\", \"title\": \"Classification\", \"description\": \"Classification", "\"order\": 2 }, \"threat_type\": { \"type\": \"string\", \"title\": \"Threat Type\",", "observable that needs approval\", \"order\": 2 } }, \"required\": [", "{ \"type\": \"string\", \"title\": \"Classification\", \"description\": \"Classification of the observable\",", "], \"order\": 4 }, \"confidence\": { \"type\": \"integer\", \"title\": \"Confidence\",", "is not associated with an observable\", \"order\": 8 }, \"email_mapping\":", "the imported observables\", \"order\": 13 }, \"trustedcircles\": { \"type\": \"array\",", "observable. This information is displayed in the Tags column of", "\"order\": 7 }, \"md5_mapping\": { \"type\": \"string\", \"title\": \"MD5 Mapping\",", "If you want to import the threat data to multiple", "associated with an observable\", \"order\": 10 }, \"expiration_ts\": { \"type\":", "\"string\", \"title\": \"URL Mapping\", \"description\": \"Indicator type to assign if", "score can range from 0-100, in increasing order of confidence\",", "that needs approval\", \"order\": 2 } }, \"required\": [ \"file\"", "\"items\": { \"type\": \"integer\" }, \"order\": 12 }, \"url_mapping\": {", "file\" } } }, \"observable_settings\": { \"type\": \"object\", \"title\": \"observable_settings\",", "Anomali ThreatStream with approval\" class Input: FILE = \"file\" OBSERVABLE_SETTINGS", "Mapping\", \"description\": \"Indicator type to assign if a specific type", "is not associated with an observable\", \"order\": 9 } },", "1 }, \"observable_settings\": { \"$ref\": \"#/definitions/observable_settings\", \"title\": \"Observable Settings\", \"description\":", "\"order\": 3 }, \"job_id\": { \"type\": \"string\", \"title\": \"Job ID\",", "\"If import was successful\", \"order\": 2 } } } }", "\"observable_settings\", \"properties\": { \"classification\": { \"type\": \"string\", \"title\": \"Classification\", \"description\":", "\"Threat Type\", \"description\": \"Type of threat associated with the imported", "1 } }, \"definitions\": { \"import_observable_response\": { \"type\": \"object\", \"title\":", "\"default\": \"\", \"enum\": [ \"low\", \"medium\", \"high\", \"very-high\", \"\" ],", "\"Additional details for the observable. This information is displayed in", "'note3']\", \"items\": { \"type\": \"string\" }, \"order\": 6 }, \"severity\":", "\"Name of file\" } } }, \"observable_settings\": { \"type\": \"object\",", "\"severity\": { \"type\": \"string\", \"title\": \"Severity\", \"description\": \"Severity you want", "Type\", \"description\": \"Type of threat associated with the imported observables\",", "json.loads(\"\"\" { \"type\": \"object\", \"title\": \"Variables\", \"properties\": { \"file\": {", "\"string\", \"title\": \"Threat Type\", \"description\": \"Type of threat associated with", "{ \"type\": \"string\", \"title\": \"URL Mapping\", \"description\": \"Indicator type to", "\"title\": \"Observable Settings\", \"description\": \"Settings needed for importing an observable", "{ \"$ref\": \"#/definitions/file\", \"title\": \"File\", \"description\": \"File of data to", "\"import_session_id\": { \"type\": \"string\", \"title\": \"Import Session ID\", \"description\": \"ID", "\"observable_settings\": { \"$ref\": \"#/definitions/observable_settings\", \"title\": \"Observable Settings\", \"description\": \"Settings needed", "SDK - DO NOT EDIT import komand import json class", "\"type\": \"string\", \"title\": \"Expiration Time Stamp\", \"displayType\": \"date\", \"description\": \"Time", "\"Variables\", \"properties\": { \"file\": { \"$ref\": \"#/definitions/file\", \"title\": \"File\", \"description\":", "\"title\": \"Classification\", \"description\": \"Classification of the observable\", \"default\": \"private\", \"enum\":", "for the observable. This information is displayed in the Tags", "json class Component: DESCRIPTION = \"Import observable(s) into Anomali ThreatStream", "of confidence\", \"order\": 1 }, \"domain_mapping\": { \"type\": \"string\", \"title\":", "\"Expiration Time Stamp\", \"displayType\": \"date\", \"description\": \"Time stamp of when", "} } } \"\"\") def __init__(self): super(self.__class__, self).__init__(self.schema) class ImportObservableOutput(komand.Output):", "{ \"type\": \"string\", \"title\": \"MD5 Mapping\", \"description\": \"Indicator type to", "if a specific type is not associated with an observable\",", "\"title\": \"Notes\", \"description\": \"Additional details for the observable. This information", "\"boolean\", \"title\": \"Success\", \"description\": \"If import was successful\", \"order\": 2", "7 }, \"md5_mapping\": { \"type\": \"string\", \"title\": \"MD5 Mapping\", \"description\":", "e.g ['note1', 'note2', 'note3']\", \"items\": { \"type\": \"string\" }, \"order\":", "1 }, \"success\": { \"type\": \"boolean\", \"title\": \"Success\", \"description\": \"If", "\"type\": \"integer\" }, \"order\": 12 }, \"url_mapping\": { \"type\": \"string\",", "of comma-separated IDs e.g [1,2,3]\", \"items\": { \"type\": \"integer\" },", "type is not associated with an observable\", \"order\": 11 },", "\"description\": \"File Object\", \"properties\": { \"content\": { \"type\": \"string\", \"title\":", "\"date-time\", \"order\": 5 }, \"ip_mapping\": { \"type\": \"string\", \"title\": \"IP", "4 }, \"confidence\": { \"type\": \"integer\", \"title\": \"Confidence\", \"description\": \"Confidence", "\"filename\": { \"type\": \"string\", \"title\": \"Filename\", \"description\": \"Name of file\"", "\"description\": \"Specifies the ratio between the amount of the source", "the observable. Confidence score can range from 0-100, in increasing", "importing an observable that needs approval\", \"order\": 2 } },", "\"expiration_ts\": { \"type\": \"string\", \"title\": \"Expiration Time Stamp\", \"displayType\": \"date\",", "\"type\": \"string\", \"title\": \"Email Mapping\", \"description\": \"Indicator type to assign", "], \"definitions\": { \"file\": { \"id\": \"file\", \"type\": \"object\", \"title\":", "KOMAND SDK - DO NOT EDIT import komand import json", "\"object\", \"title\": \"import_observable_response\", \"properties\": { \"import_session_id\": { \"type\": \"string\", \"title\":", "when intelligence will expire on ThreatStream\", \"format\": \"date-time\", \"order\": 5", "\"#/definitions/observable_settings\", \"title\": \"Observable Settings\", \"description\": \"Settings needed for importing an", "contents\", \"format\": \"bytes\" }, \"filename\": { \"type\": \"string\", \"title\": \"Filename\",", "}, \"trustedcircles\": { \"type\": \"array\", \"title\": \"Trusted Circles\", \"description\": \"ID", "}, \"required\": [ \"classification\" ] } } } \"\"\") def", "observable\", \"order\": 10 }, \"expiration_ts\": { \"type\": \"string\", \"title\": \"Expiration", "\"string\", \"title\": \"Severity\", \"description\": \"Severity you want to assign to", "to the observable when it is imported\", \"default\": \"\", \"enum\":", "}, \"notes\": { \"type\": \"array\", \"title\": \"Notes\", \"description\": \"Additional details", "\"high\", \"very-high\", \"\" ], \"order\": 3 }, \"source_confidence_weight\": { \"type\":", "\"Job ID\", \"order\": 1 }, \"success\": { \"type\": \"boolean\", \"title\":", "\"type\": \"string\", \"title\": \"URL Mapping\", \"description\": \"Indicator type to assign", "type is not associated with an observable\", \"order\": 9 }", "\"Job ID\", \"description\": \"Job ID\", \"order\": 1 }, \"success\": {", "\"Results from importing observable(s)\", \"order\": 1 } }, \"definitions\": {", "{ \"file\": { \"id\": \"file\", \"type\": \"object\", \"title\": \"File\", \"description\":", "\"integer\" }, \"order\": 12 }, \"url_mapping\": { \"type\": \"string\", \"title\":", "\"Trusted Circles\", \"description\": \"ID of the trusted circle to which", "\"notes\": { \"type\": \"array\", \"title\": \"Notes\", \"description\": \"Additional details for" ]
[ "reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all", "self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(CONF.max_instances_per_user, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)", "= {'instances': 1, 'volumes': CONF.max_volumes_per_user + 1} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1,", "tearDown(self): super(QuotaControllerTest, self).tearDown() unstub() def test_update_unknown_resource(self): body = {'quotas': {'unknown_resource':", "QUOTAS.reserve self.quota_rollback_orig = QUOTAS.rollback self.quota_commit_orig = QUOTAS.commit QUOTAS.reserve = Mock()", "self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(2, usages[Resource.INSTANCES].in_use) self.assertEquals(1, usages[Resource.INSTANCES].reserved)", "from trove.quota.quota import DbQuotaDriver from trove.quota.models import Resource from trove.quota.models", "quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(CONF.max_instances_per_user, quotas[Resource.INSTANCES].hard_limit)", "self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(0, usage.in_use) self.assertEquals(0, usage.reserved) def test_get_all_quota_usages_by_tenant(self):", "self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota(self): FAKE_QUOTAS = [QuotaUsage(id=1,", "body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, times=1).save() self.assertTrue('instances' in result._data['quotas']) self.assertEquals(200, result.status)", "FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[0].status) self.assertEqual(1, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK,", "def test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2, reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES,", "self.assertEquals(Resource.INSTANCES, quota.resource) self.assertEquals(12, quota.hard_limit) def test_get_quota_by_tenant_default(self): self.mock_quota_result.all = Mock(return_value=[]) quota", "reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all", "permissions and limitations # under the License. import testtools from", "None, 'volumes': 10}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota,", "when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='volumes').thenReturn(volume_quota) body = {'quotas': {'instances': None, 'volumes': 10}}", "CONF.max_volumes_per_user + 1} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_usage(self):", "resource=Resource.INSTANCES, in_use=0, reserved=0)] NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all", "self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.VOLUMES, quota.resource) self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit) def test_get_all_quotas_by_tenant(self):", "reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances':", "test_get_all_quotas_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)] self.mock_quota_result.all", "self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(0, usages[Resource.INSTANCES].in_use) self.assertEquals(0, usages[Resource.INSTANCES].reserved)", "= [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1,", "= Quota.find_all self.orig_QuotaUsage_find_all = QuotaUsage.find_all self.orig_QuotaUsage_find_by = QuotaUsage.find_by self.orig_Reservation_create =", "test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all = Mock(return_value=[]) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)", "req = mock() req.environ = mock() when(req.environ).get(any()).thenReturn(context) self.req = req", "= Reservation.save self.mock_quota_result = Mock() self.mock_usage_result = Mock() Quota.find_all =", "of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless", "self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 4,", "specific language governing permissions and limitations # under the License.", "self).tearDown() QUOTAS.reserve = self.quota_reserve_orig QUOTAS.rollback = self.quota_rollback_orig QUOTAS.commit = self.quota_commit_orig", "self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "[QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=10, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=50, reserved=0)]", "# not use this file except in compliance with the", "usages[Resource.INSTANCES].in_use) self.assertEquals(0, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0,", "test_update_resource_no_value(self): quota = mock(Quota) when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2, resource='instances').thenReturn(quota) body = {'quotas': {'instances':", "= self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_user, defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_user, defaults[Resource.VOLUMES]) def test_get_quota_by_tenant(self): FAKE_QUOTAS =", "usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(0, usage.in_use) self.assertEquals(0, usage.reserved) def test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS", "trove.quota.models import Resource from trove.quota.models import Quota from trove.quota.models import", "QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status)", "self.orig_QuotaUsage_create = QuotaUsage.create self.orig_QuotaUsage_save = QuotaUsage.save self.orig_Reservation_save = Reservation.save self.mock_quota_result", "FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) self.assertEqual(3, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED,", "in compliance with the License. You may obtain # a", "= self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.VOLUMES, quota.resource) self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit) def", "when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) body = {'quotas': {'instances': 2}} result =", "super(QuotaControllerTest, self).tearDown() unstub() def test_update_unknown_resource(self): body = {'quotas': {'unknown_resource': 5}}", "quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_quota_usage_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,", "def test_get_quota_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)", "You may obtain # a copy of the License at", "usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0)] NEW_FAKE_QUOTA", "Mock() delta = {'instances': 2, 'volumes': 3} self.driver.reserve(FAKE_TENANT1, resources, delta)", "resources, delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(-1, kw['delta'])", "self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS =", "+ 1} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_usage(self): FAKE_QUOTAS", "Mock(return_value=self.mock_quota_result) QuotaUsage.find_all = Mock(return_value=self.mock_usage_result) def tearDown(self): super(DbQuotaDriverTest, self).tearDown() Quota.find_all =", "Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[0].status) self.assertEqual(1, FAKE_QUOTAS[1].in_use)", "Mock(return_value=FAKE_QUOTAS) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.INSTANCES, quota.resource) self.assertEquals(12,", "2012 OpenStack Foundation # # Licensed under the Apache License,", "trove.quota.quota import run_with_quotas from trove.quota.quota import QUOTAS \"\"\" Unit tests", "10}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, never).save() self.assertFalse('instances'", "self.req, body, FAKE_TENANT1, FAKE_TENANT2) def test_update_resource_no_value(self): quota = mock(Quota) when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2,", "NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.create", "tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)", "QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=50, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all =", "self.quota_reserve_orig QUOTAS.rollback = self.quota_rollback_orig QUOTAS.commit = self.quota_commit_orig def test_run_with_quotas(self): f", "the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required", "self.assertEqual(3, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status) def test_rollback(self): Reservation.save =", "result.status) self.assertEquals(2, result._data['quotas']['instances']) @testtools.skipIf(not CONF.trove_volume_support, 'Volume support is not enabled')", "in_use=50, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save =", "delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(2, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED,", "usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(2, usages[Resource.INSTANCES].in_use)", "when(req.environ).get(any()).thenReturn(context) self.req = req self.controller = QuotaController() def tearDown(self): super(QuotaControllerTest,", "= Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(quota, never).save() self.assertEquals(200, result.status)", "self.assertEquals(1, usages[Resource.VOLUMES].in_use) self.assertEquals(1, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "import run_with_quotas from trove.quota.quota import QUOTAS \"\"\" Unit tests for", "under the License is distributed on an \"AS IS\" BASIS,", "import testtools from mockito import mock, when, unstub, any, verify,", "resource=Resource.INSTANCES, hard_limit=22), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas =", "self.quota_rollback_orig = QUOTAS.rollback self.quota_commit_orig = QUOTAS.commit QUOTAS.reserve = Mock() QUOTAS.rollback", "test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0)] NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1,", "{'quotas': {'unknown_resource': 5}} self.assertRaises(exception.QuotaResourceUnknown, self.controller.update, self.req, body, FAKE_TENANT1, FAKE_TENANT2) def", "class QuotaControllerTest(testtools.TestCase): def setUp(self): super(QuotaControllerTest, self).setUp() context = mock() context.is_admin", "the License. import testtools from mockito import mock, when, unstub,", "context = mock() context.is_admin = True req = mock() req.environ", "= Mock(side_effect=Exception()) self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f)", "Reservation.save self.mock_quota_result = Mock() self.mock_usage_result = Mock() Quota.find_all = Mock(return_value=self.mock_quota_result)", "FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=10, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES,", "= QUOTAS.rollback self.quota_commit_orig = QUOTAS.commit QUOTAS.reserve = Mock() QUOTAS.rollback =", "FAKE_TENANT2) verify(instance_quota, never).save() self.assertFalse('instances' in result._data['quotas']) verify(volume_quota, times=1).save() self.assertEquals(200, result.status)", "quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all", "def tearDown(self): super(Run_with_quotasTest, self).tearDown() QUOTAS.reserve = self.quota_reserve_orig QUOTAS.rollback = self.quota_rollback_orig", "tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)", "= self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(2, usages[Resource.INSTANCES].in_use) self.assertEquals(1,", "kw['status']) def test_reserve_resource_unknown(self): delta = {'instances': 10, 'volumes': 2000, 'Fake_resource':", "self.assertTrue('instances' in result._data['quotas']) self.assertEquals(200, result.status) self.assertEquals(2, result._data['quotas']['instances']) @testtools.skipIf(not CONF.trove_volume_support, 'Volume", "usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_reserve(self): FAKE_QUOTAS", "quota.tenant_id) self.assertEquals(Resource.VOLUMES, quota.resource) self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit) def test_get_all_quotas_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,", "'volumes': 2} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS", "kw['usage_id']) self.assertEquals(-3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_commit(self): Reservation.save = Mock()", "this file except in compliance with the License. You may", "self.orig_QuotaUsage_find_all = QuotaUsage.find_all self.orig_QuotaUsage_find_by = QuotaUsage.find_by self.orig_Reservation_create = Reservation.create self.orig_QuotaUsage_create", "result._data['quotas']['volumes']) class DbQuotaDriverTest(testtools.TestCase): def setUp(self): super(DbQuotaDriverTest, self).setUp() self.driver = DbQuotaDriver(resources)", "usage.reserved) def test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2, reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1,", "self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(1, usages[Resource.VOLUMES].in_use) self.assertEquals(1, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS =", "{'quotas': {'instances': None}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(quota,", "FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quota =", "self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create = Mock() delta", "super(QuotaControllerTest, self).setUp() context = mock() context.is_admin = True req =", "2000, 'Fake_resource': 123} self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota(self):", "Mock(return_value=self.mock_usage_result) def tearDown(self): super(DbQuotaDriverTest, self).tearDown() Quota.find_all = self.orig_Quota_find_all QuotaUsage.find_all =", "self.assertEquals(0, usage.reserved) def test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2, reserved=1),", "delta) def test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2),", "functions in DbQuotaDriver.py. \"\"\" CONF = cfg.CONF resources = {", "= Mock(return_value=FAKE_QUOTAS) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.INSTANCES, quota.resource)", "self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(quota, never).save() self.assertEquals(200, result.status) def test_update_resource_instance(self):", "quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)] self.mock_quota_result.all =", "= Mock() delta = {'instances': -1, 'volumes': -3} self.driver.reserve(FAKE_TENANT1, resources,", "def test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all = Mock(return_value=[]) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1,", "self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)] self.mock_quota_result.all", "software # distributed under the License is distributed on an", "(the \"License\"); you may # not use this file except", "= Mock(return_value=NEW_FAKE_QUOTA) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource)", "FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "hard_limit=12)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id)", "_, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(-1, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])", "resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys())", "FAKE_RESERVATIONS[0].status) self.assertEqual(3, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status) def test_rollback(self): Reservation.save", "'Fake_resource': 123} self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota(self): FAKE_QUOTAS", "usages[Resource.INSTANCES].resource) self.assertEquals(2, usages[Resource.INSTANCES].in_use) self.assertEquals(1, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(1,", "Mock(return_value=[]) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.VOLUMES, quota.resource) self.assertEquals(CONF.max_volumes_per_user,", "usage.resource) self.assertEquals(3, usage.in_use) self.assertEquals(1, usage.reserved) def test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1,", "'volumes': 10}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, never).save()", "file except in compliance with the License. You may obtain", "# under the License. import testtools from mockito import mock,", "delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5,", "self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(CONF.max_instances_per_user, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit)", "def test_get_quota_usage_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3, reserved=1)] self.mock_usage_result.all =", "= Mock() QUOTAS.commit = Mock() def tearDown(self): super(Run_with_quotasTest, self).tearDown() QUOTAS.reserve", "FAKE_TENANT1, resources, delta) def test_reserve_over_quota(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all =", "= QUOTAS.reserve self.quota_rollback_orig = QUOTAS.rollback self.quota_commit_orig = QUOTAS.commit QUOTAS.reserve =", "Reservation.create self.orig_QuotaUsage_create = QuotaUsage.create self.orig_QuotaUsage_save = QuotaUsage.save self.orig_Reservation_save = Reservation.save", "delta) def test_reserve_over_quota_with_usage(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=0),", "in_use=0, reserved=0)] self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS) usages =", "= QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create =", "OR CONDITIONS OF ANY KIND, either express or implied. See", "the specific language governing permissions and limitations # under the", "= cfg.CONF resources = { Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'), Resource.VOLUMES: Resource(Resource.VOLUMES,", "DatabaseModelBase from trove.extensions.mgmt.quota.service import QuotaController from trove.common import exception from", "test_reserve(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1,", "quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.INSTANCES, quota.resource) self.assertEquals(12, quota.hard_limit)", "def setUp(self): super(Run_with_quotasTest, self).setUp() self.quota_reserve_orig = QUOTAS.reserve self.quota_rollback_orig = QUOTAS.rollback", "= self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(0, usages[Resource.INSTANCES].in_use) self.assertEquals(0,", "under the Apache License, Version 2.0 (the \"License\"); you may", "kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(-1, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _,", "resource=Resource.INSTANCES, hard_limit=22)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1,", "= Mock() run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called)", "= self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.INSTANCES, quota.resource) self.assertEquals(12, quota.hard_limit) def", "= Mock(return_value=FAKE_QUOTAS) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource)", "= Mock(return_value=FAKE_QUOTAS) QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1,", "quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(CONF.max_instances_per_user, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user,", "req self.controller = QuotaController() def tearDown(self): super(QuotaControllerTest, self).tearDown() unstub() def", "test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas", "= True req = mock() req.environ = mock() when(req.environ).get(any()).thenReturn(context) self.req", "self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_reserve_resource_unknown(self): delta = {'instances': 10, 'volumes': 2000,", "= Reservation.create self.orig_QuotaUsage_create = QuotaUsage.create self.orig_QuotaUsage_save = QuotaUsage.save self.orig_Reservation_save =", "resource=Resource.VOLUMES, in_use=3, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES)", "-1, 'volumes': -3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw = Reservation.create.call_args_list[0]", "f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called) def test_run_with_quotas_error(self): f = Mock(side_effect=Exception())", "{'instances': None}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(quota, never).save()", "test_update_resource_instance(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) body = {'quotas':", "tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all", "test_get_quota_by_tenant_default(self): self.mock_quota_result.all = Mock(return_value=[]) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id)", "self.assertEquals(1, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0),", "reserved=0)] NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "\"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY", "Quota.find_all self.orig_QuotaUsage_find_all = QuotaUsage.find_all self.orig_QuotaUsage_find_by = QuotaUsage.find_by self.orig_Reservation_create = Reservation.create", "[QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)]", "\"654321\" class Run_with_quotasTest(testtools.TestCase): def setUp(self): super(Run_with_quotasTest, self).setUp() self.quota_reserve_orig = QUOTAS.reserve", "= Mock() QUOTAS.rollback = Mock() QUOTAS.commit = Mock() def tearDown(self):", "self.quota_commit_orig def test_run_with_quotas(self): f = Mock() run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes':", "[QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2, reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_usage_result.all =", "when, unstub, any, verify, never, times from mock import Mock", "quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,", "'volumes': CONF.max_volumes_per_user + 1} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def", "= Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create = Mock() delta =", "at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable", "= Mock(return_value=FAKE_QUOTAS) delta = {'instances': 5, 'volumes': 3} self.assertRaises(exception.QuotaExceeded, self.driver.reserve,", "= [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=10, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=50,", "self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_quota_usage_by_tenant(self):", "to in writing, software # distributed under the License is", "self.assertEquals(1, kw['usage_id']) self.assertEquals(2, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1]", "any, verify, never, times from mock import Mock from trove.quota.quota", "= [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1,", "def test_reserve_resource_unknown(self): delta = {'instances': 10, 'volumes': 2000, 'Fake_resource': 123}", "Mock() delta = {'instances': -1, 'volumes': -3} self.driver.reserve(FAKE_TENANT1, resources, delta)", "= DbQuotaDriver(resources) self.orig_Quota_find_all = Quota.find_all self.orig_QuotaUsage_find_all = QuotaUsage.find_all self.orig_QuotaUsage_find_by =", "self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.INSTANCES, quota.resource) self.assertEquals(12, quota.hard_limit) def test_get_quota_by_tenant_default(self): self.mock_quota_result.all =", "self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(3, usage.in_use) self.assertEquals(1, usage.reserved) def test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA =", "FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES,", "3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id'])", "from trove.quota.quota import QUOTAS \"\"\" Unit tests for the classes", "usages[Resource.INSTANCES].in_use) self.assertEquals(1, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(1, usages[Resource.VOLUMES].in_use) self.assertEquals(1,", "body = {'quotas': {'instances': 2}} result = self.controller.update(self.req, body, FAKE_TENANT1,", "FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status) def test_rollback(self): Reservation.save = Mock() QuotaUsage.save =", "or agreed to in writing, software # distributed under the", "self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) self.assertEqual(3, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status) def test_rollback(self):", "usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(0, usages[Resource.INSTANCES].in_use)", "required by applicable law or agreed to in writing, software", "Mock() QUOTAS.commit = Mock() def tearDown(self): super(Run_with_quotasTest, self).tearDown() QUOTAS.reserve =", "unstub() def test_update_unknown_resource(self): body = {'quotas': {'unknown_resource': 5}} self.assertRaises(exception.QuotaResourceUnknown, self.controller.update,", "from trove.quota.models import Quota from trove.quota.models import QuotaUsage from trove.quota.models", "= mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) body = {'quotas': {'instances': 2}}", "self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys())", "self.assertEquals(0, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved)", "self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 5,", "self.orig_QuotaUsage_create QuotaUsage.save = self.orig_QuotaUsage_save Reservation.save = self.orig_Reservation_save def test_get_defaults(self): defaults", "Unit tests for the classes and functions in DbQuotaDriver.py. \"\"\"", "Mock(return_value=FAKE_QUOTA) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(0,", "for the classes and functions in DbQuotaDriver.py. \"\"\" CONF =", "Reservation.create = Mock() delta = {'instances': 2, 'volumes': 3} self.driver.reserve(FAKE_TENANT1,", "= \"654321\" class Run_with_quotasTest(testtools.TestCase): def setUp(self): super(Run_with_quotasTest, self).setUp() self.quota_reserve_orig =", "or implied. See the # License for the specific language", "self.assertEquals(1, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(1, usages[Resource.VOLUMES].in_use) self.assertEquals(1, usages[Resource.VOLUMES].reserved)", "in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)]", "Mock() Reservation.create = Mock() delta = {'instances': -1, 'volumes': -3}", "= Mock() def tearDown(self): super(Run_with_quotasTest, self).tearDown() QUOTAS.reserve = self.quota_reserve_orig QUOTAS.rollback", "resource=Resource.VOLUMES, hard_limit=15)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1,", "Apache License, Version 2.0 (the \"License\"); you may # not", "= QuotaUsage.find_by self.orig_Reservation_create = Reservation.create self.orig_QuotaUsage_create = QuotaUsage.create self.orig_QuotaUsage_save =", "QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA)", "{'quotas': {'instances': 2}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota,", "= self.orig_Reservation_create QuotaUsage.create = self.orig_QuotaUsage_create QuotaUsage.save = self.orig_QuotaUsage_save Reservation.save =", "self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0)]", "quotas[Resource.VOLUMES].resource) self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all = Mock(return_value=[]) quotas =", "agreed to in writing, software # distributed under the License", "reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usages =", "Quota.find_all = self.orig_Quota_find_all QuotaUsage.find_all = self.orig_QuotaUsage_find_all QuotaUsage.find_by = self.orig_QuotaUsage_find_by Reservation.create", "in_use=1, reserved=1)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save =", "usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_reserve(self): FAKE_QUOTAS = [QuotaUsage(id=1,", "kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(3,", "Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1,", "self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_one_default(self):", "distributed under the License is distributed on an \"AS IS\"", "self.assertEquals(3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_reserve_resource_unknown(self): delta = {'instances': 10,", "FAKE_TENANT2 = \"654321\" class Run_with_quotasTest(testtools.TestCase): def setUp(self): super(Run_with_quotasTest, self).setUp() self.quota_reserve_orig", "resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2,", "quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_quota_usage_by_tenant(self): FAKE_QUOTAS", "Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys())", "usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS", "Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1,", "quotas[Resource.INSTANCES].resource) self.assertEquals(CONF.max_instances_per_user, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def", "trove.extensions.mgmt.quota.service import QuotaController from trove.common import exception from trove.common import", "License, Version 2.0 (the \"License\"); you may # not use", "CONDITIONS OF ANY KIND, either express or implied. See the", "test_run_with_quotas(self): f = Mock() run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f)", "= Mock(return_value=[]) QuotaUsage.create = Mock(return_value=FAKE_QUOTA) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1,", "def test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2,", "usages[Resource.VOLUMES].reserved) def test_reserve(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2),", "reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create =", "self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) self.assertEqual(3, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status)", "not use this file except in compliance with the License.", "def test_get_defaults(self): defaults = self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_user, defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_user, defaults[Resource.VOLUMES]) def", "= { Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'), Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'), } FAKE_TENANT1", "test_get_quota_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quota", "Mock(return_value=FAKE_QUOTAS) QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)", "self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create", "writing, software # distributed under the License is distributed on", "_, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])", "tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all", "self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit) def test_get_all_quotas_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22), Quota(tenant_id=FAKE_TENANT1,", "reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id)", "= Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(2, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "def test_get_quota_by_tenant_default(self): self.mock_quota_result.all = Mock(return_value=[]) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1,", "mock import Mock from trove.quota.quota import DbQuotaDriver from trove.quota.models import", "Mock() run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called)", "quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(15,", "tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all", "the License. You may obtain # a copy of the", "DbQuotaDriver from trove.quota.models import Resource from trove.quota.models import Quota from", "an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF", "def test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)", "use this file except in compliance with the License. You", "CONF = cfg.CONF resources = { Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'), Resource.VOLUMES:", "context.is_admin = True req = mock() req.environ = mock() when(req.environ).get(any()).thenReturn(context)", "License. import testtools from mockito import mock, when, unstub, any,", "under the License. import testtools from mockito import mock, when,", "[QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1,", "self.mock_quota_result.all = Mock(return_value=[]) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.VOLUMES,", "self.assertEquals(2, result._data['quotas']['instances']) @testtools.skipIf(not CONF.trove_volume_support, 'Volume support is not enabled') def", "self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_reserve(self): FAKE_QUOTAS =", "self.assertEqual(5, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[0].status) self.assertEqual(1, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved)", "self.quota_reserve_orig = QUOTAS.reserve self.quota_rollback_orig = QUOTAS.rollback self.quota_commit_orig = QUOTAS.commit QUOTAS.reserve", "= self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(3, usage.in_use) self.assertEquals(1,", "self.assertEquals(CONF.max_instances_per_user, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_one_default(self):", "QuotaUsage.create = Mock(return_value=FAKE_QUOTA) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES,", "resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS =", "'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called) class QuotaControllerTest(testtools.TestCase): def", "self.controller = QuotaController() def tearDown(self): super(QuotaControllerTest, self).tearDown() unstub() def test_update_unknown_resource(self):", "= self.quota_commit_orig def test_run_with_quotas(self): f = Mock() run_with_quotas(FAKE_TENANT1, {'instances': 1,", "= QuotaController() def tearDown(self): super(QuotaControllerTest, self).tearDown() unstub() def test_update_unknown_resource(self): body", "= {'instances': 2, 'volumes': 3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw", "in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(return_value=FAKE_QUOTA) usage =", "in_use=1, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1,", "test_reserve_over_quota_with_usage(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1,", "self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all = Mock(return_value=[]) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,", "QUOTAS.rollback = Mock() QUOTAS.commit = Mock() def tearDown(self): super(Run_with_quotasTest, self).tearDown()", "self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all =", "[Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas", "QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[0].status)", "\"\"\" CONF = cfg.CONF resources = { Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'),", "self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called) class QuotaControllerTest(testtools.TestCase): def setUp(self): super(QuotaControllerTest, self).setUp() context", "quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_quota_usage_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3,", "resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(2, usages[Resource.INSTANCES].in_use) self.assertEquals(1, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1,", "Mock(side_effect=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(0,", "'volumes': -3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1,", "Mock() self.mock_usage_result = Mock() Quota.find_all = Mock(return_value=self.mock_quota_result) QuotaUsage.find_all = Mock(return_value=self.mock_usage_result)", "quota.resource) self.assertEquals(12, quota.hard_limit) def test_get_quota_by_tenant_default(self): self.mock_quota_result.all = Mock(return_value=[]) quota =", "resources, delta) def test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1,", "self.assertEquals(Resource.VOLUMES, quota.resource) self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit) def test_get_all_quotas_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "License is distributed on an \"AS IS\" BASIS, WITHOUT #", "KIND, either express or implied. See the # License for", "resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES,", "2, 'volumes': 3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw = Reservation.create.call_args_list[0]", "self.orig_QuotaUsage_find_by = QuotaUsage.find_by self.orig_Reservation_create = Reservation.create self.orig_QuotaUsage_create = QuotaUsage.create self.orig_QuotaUsage_save", "= [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0,", "super(Run_with_quotasTest, self).setUp() self.quota_reserve_orig = QUOTAS.reserve self.quota_rollback_orig = QUOTAS.rollback self.quota_commit_orig =", "test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0,", "self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(2,", "never, times from mock import Mock from trove.quota.quota import DbQuotaDriver", "body, FAKE_TENANT1, FAKE_TENANT2) def test_update_resource_no_value(self): quota = mock(Quota) when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2, resource='instances').thenReturn(quota)", "\"License\"); you may # not use this file except in", "self.assertEquals(0, usages[Resource.INSTANCES].in_use) self.assertEquals(0, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use)", "= self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(CONF.max_instances_per_user, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1,", "usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1,", "IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND,", "resource=Resource.INSTANCES, in_use=10, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=50, reserved=0)] self.mock_quota_result.all =", "trove.quota.models import QuotaUsage from trove.quota.models import Reservation from trove.db.models import", "self.assertEquals(200, result.status) def test_update_resource_instance(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota)", "{'unknown_resource': 5}} self.assertRaises(exception.QuotaResourceUnknown, self.controller.update, self.req, body, FAKE_TENANT1, FAKE_TENANT2) def test_update_resource_no_value(self):", "express or implied. See the # License for the specific", "self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id)", "delta = {'instances': 1, 'volumes': CONF.max_volumes_per_user + 1} self.assertRaises(exception.QuotaExceeded, self.driver.reserve,", "the Apache License, Version 2.0 (the \"License\"); you may #", "= Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource)", "self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(CONF.max_instances_per_user, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)", "self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_all_default(self):", "self.mock_quota_result = Mock() self.mock_usage_result = Mock() Quota.find_all = Mock(return_value=self.mock_quota_result) QuotaUsage.find_all", "in_use=3, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1,", "when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2, resource='instances').thenReturn(quota) body = {'quotas': {'instances': None}} result = self.controller.update(self.req,", "status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6, FAKE_QUOTAS[0].in_use)", "never).save() self.assertFalse('instances' in result._data['quotas']) verify(volume_quota, times=1).save() self.assertEquals(200, result.status) self.assertEquals(10, result._data['quotas']['volumes'])", "tests for the classes and functions in DbQuotaDriver.py. \"\"\" CONF", "5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called) class QuotaControllerTest(testtools.TestCase): def setUp(self):", "= {'instances': 10, 'volumes': 2000, 'Fake_resource': 123} self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve, FAKE_TENANT1,", "See the # License for the specific language governing permissions", "QUOTAS.rollback = self.quota_rollback_orig QUOTAS.commit = self.quota_commit_orig def test_run_with_quotas(self): f =", "self.orig_QuotaUsage_find_all QuotaUsage.find_by = self.orig_QuotaUsage_find_by Reservation.create = self.orig_Reservation_create QuotaUsage.create = self.orig_QuotaUsage_create", "from trove.extensions.mgmt.quota.service import QuotaController from trove.common import exception from trove.common", "quota.tenant_id) self.assertEquals(Resource.INSTANCES, quota.resource) self.assertEquals(12, quota.hard_limit) def test_get_quota_by_tenant_default(self): self.mock_quota_result.all = Mock(return_value=[])", "reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=50, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all", "resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta", "class DbQuotaDriverTest(testtools.TestCase): def setUp(self): super(DbQuotaDriverTest, self).setUp() self.driver = DbQuotaDriver(resources) self.orig_Quota_find_all", "def test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=10, reserved=0), QuotaUsage(id=2,", "body = {'quotas': {'instances': None}} result = self.controller.update(self.req, body, FAKE_TENANT1,", "= self.orig_QuotaUsage_find_all QuotaUsage.find_by = self.orig_QuotaUsage_find_by Reservation.create = self.orig_Reservation_create QuotaUsage.create =", "quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)]", "FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0)] NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES,", "test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2, reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1,", "# a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0", "Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5,", "= Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 1, 'volumes':", "FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status) def test_rollback(self): Reservation.save = Mock()", "FAKE_TENANT2) verify(instance_quota, times=1).save() self.assertTrue('instances' in result._data['quotas']) self.assertEquals(200, result.status) self.assertEquals(2, result._data['quotas']['instances'])", "resource=Resource.INSTANCES, in_use=1, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all =", "def test_commit(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS =", "trove.db.models import DatabaseModelBase from trove.extensions.mgmt.quota.service import QuotaController from trove.common import", "# Copyright 2012 OpenStack Foundation # # Licensed under the", "tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) body = {'quotas': {'instances': 2}} result = self.controller.update(self.req,", "Run_with_quotasTest(testtools.TestCase): def setUp(self): super(Run_with_quotasTest, self).setUp() self.quota_reserve_orig = QUOTAS.reserve self.quota_rollback_orig =", "def test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all =", "= mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) volume_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2,", "QuotaController from trove.common import exception from trove.common import cfg from", "req.environ = mock() when(req.environ).get(any()).thenReturn(context) self.req = req self.controller = QuotaController()", "super(Run_with_quotasTest, self).tearDown() QUOTAS.reserve = self.quota_reserve_orig QUOTAS.rollback = self.quota_rollback_orig QUOTAS.commit =", "reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)", "resources, delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(2, kw['delta'])", "import QUOTAS \"\"\" Unit tests for the classes and functions", "resource=Resource.VOLUMES, in_use=50, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save", "Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 5, 'volumes': 3}", "= mock(Quota) when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2, resource='instances').thenReturn(quota) body = {'quotas': {'instances': None}} result", "law or agreed to in writing, software # distributed under", "self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, times=1).save() self.assertTrue('instances' in result._data['quotas']) self.assertEquals(200,", "body, FAKE_TENANT1, FAKE_TENANT2) verify(quota, never).save() self.assertEquals(200, result.status) def test_update_resource_instance(self): instance_quota", "exception from trove.common import cfg from trove.quota.quota import run_with_quotas from", "[QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)]", "self.quota_rollback_orig QUOTAS.commit = self.quota_commit_orig def test_run_with_quotas(self): f = Mock() run_with_quotas(FAKE_TENANT1,", "cfg.CONF resources = { Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'), Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'),", "self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(0, usage.in_use) self.assertEquals(0, usage.reserved) def test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS =", "self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit)", "in_use=0, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[])", "QuotaControllerTest(testtools.TestCase): def setUp(self): super(QuotaControllerTest, self).setUp() context = mock() context.is_admin =", "= self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, times=1).save() self.assertTrue('instances' in result._data['quotas'])", "in_use=10, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=50, reserved=0)] self.mock_quota_result.all = Mock(return_value=[])", "implied. See the # License for the specific language governing", "def tearDown(self): super(DbQuotaDriverTest, self).tearDown() Quota.find_all = self.orig_Quota_find_all QuotaUsage.find_all = self.orig_QuotaUsage_find_all", "QuotaUsage.find_all self.orig_QuotaUsage_find_by = QuotaUsage.find_by self.orig_Reservation_create = Reservation.create self.orig_QuotaUsage_create = QuotaUsage.create", "4, 'volumes': 2} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_but_can_apply_negative_deltas(self):", "{'instances': 4, 'volumes': 2} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def", "self.orig_QuotaUsage_save = QuotaUsage.save self.orig_Reservation_save = Reservation.save self.mock_quota_result = Mock() self.mock_usage_result", "usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(3, usage.in_use)", "5, 'volumes': 3} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_reserved(self):", "mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) volume_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='volumes').thenReturn(volume_quota)", "FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2, reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)]", "in_use=2, reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usages", "FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by =", "self.assertEquals(200, result.status) self.assertEquals(10, result._data['quotas']['volumes']) class DbQuotaDriverTest(testtools.TestCase): def setUp(self): super(DbQuotaDriverTest, self).setUp()", "self.orig_Quota_find_all QuotaUsage.find_all = self.orig_QuotaUsage_find_all QuotaUsage.find_by = self.orig_QuotaUsage_find_by Reservation.create = self.orig_Reservation_create", "run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called)", "self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[0].status) self.assertEqual(1, FAKE_QUOTAS[1].in_use) self.assertEqual(0,", "f = Mock(side_effect=Exception()) self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1, {'instances': 1, 'volumes': 5},", "Resource from trove.quota.models import Quota from trove.quota.models import QuotaUsage from", "self.controller.update, self.req, body, FAKE_TENANT1, FAKE_TENANT2) def test_update_resource_no_value(self): quota = mock(Quota)", "in_use=0, reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create", "FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)]", "{'instances': 5, 'volumes': 3} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def", "self).setUp() context = mock() context.is_admin = True req = mock()", "not enabled') def test_update_resource_volume(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota)", "resource='volumes').thenReturn(volume_quota) body = {'quotas': {'instances': None, 'volumes': 10}} result =", "times=1).save() self.assertEquals(200, result.status) self.assertEquals(10, result._data['quotas']['volumes']) class DbQuotaDriverTest(testtools.TestCase): def setUp(self): super(DbQuotaDriverTest,", "self).setUp() self.driver = DbQuotaDriver(resources) self.orig_Quota_find_all = Quota.find_all self.orig_QuotaUsage_find_all = QuotaUsage.find_all", "QuotaUsage.create self.orig_QuotaUsage_save = QuotaUsage.save self.orig_Reservation_save = Reservation.save self.mock_quota_result = Mock()", "usage.reserved) def test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all", "= Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) self.assertEqual(3,", "'volumes': 3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1,", "2} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS =", "and limitations # under the License. import testtools from mockito", "reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all", "import Reservation from trove.db.models import DatabaseModelBase from trove.extensions.mgmt.quota.service import QuotaController", "{'instances': 2, 'volumes': 3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw =", "usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(2, usages[Resource.INSTANCES].in_use) self.assertEquals(1, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES,", "usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0,", "QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,", "1, 'volumes': CONF.max_volumes_per_user + 1} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta)", "self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit)", "def test_update_unknown_resource(self): body = {'quotas': {'unknown_resource': 5}} self.assertRaises(exception.QuotaResourceUnknown, self.controller.update, self.req,", "body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, never).save() self.assertFalse('instances' in result._data['quotas']) verify(volume_quota, times=1).save()", "QuotaUsage.find_all = Mock(return_value=self.mock_usage_result) def tearDown(self): super(DbQuotaDriverTest, self).tearDown() Quota.find_all = self.orig_Quota_find_all", "self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_usage(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1,", "quota = mock(Quota) when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2, resource='instances').thenReturn(quota) body = {'quotas': {'instances': None}}", "self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_usage(self): FAKE_QUOTAS = [QuotaUsage(id=1,", "self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS = [QuotaUsage(id=1,", "hard_limit=22), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,", "[QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_usage_result.all =", "testtools from mockito import mock, when, unstub, any, verify, never,", "usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def", "class Run_with_quotasTest(testtools.TestCase): def setUp(self): super(Run_with_quotasTest, self).setUp() self.quota_reserve_orig = QUOTAS.reserve self.quota_rollback_orig", "delta = {'instances': -1, 'volumes': -3} self.driver.reserve(FAKE_TENANT1, resources, delta) _,", "self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(-3, kw['delta'])", "Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6, FAKE_QUOTAS[0].in_use) self.assertEqual(1,", "FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) self.assertEqual(3, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status) def", "unstub, any, verify, never, times from mock import Mock from", "trove.quota.quota import DbQuotaDriver from trove.quota.models import Resource from trove.quota.models import", "= Mock() self.mock_usage_result = Mock() Quota.find_all = Mock(return_value=self.mock_quota_result) QuotaUsage.find_all =", "quotas[Resource.VOLUMES].hard_limit) def test_get_quota_usage_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3, reserved=1)] self.mock_usage_result.all", "Reservation.create = Mock() delta = {'instances': -1, 'volumes': -3} self.driver.reserve(FAKE_TENANT1,", "{'instances': -1, 'volumes': -3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw =", "self.assertEquals(-1, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id'])", "QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all =", "usage.resource) self.assertEquals(0, usage.in_use) self.assertEquals(0, usage.reserved) def test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,", "QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS)", "in_use=0, reserved=0)] NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all =", "status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5, FAKE_QUOTAS[0].in_use)", "times from mock import Mock from trove.quota.quota import DbQuotaDriver from", "_, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(-3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])", "CONF.trove_volume_support, 'Volume support is not enabled') def test_update_resource_volume(self): instance_quota =", "trove.common import cfg from trove.quota.quota import run_with_quotas from trove.quota.quota import", "= {'instances': 4, 'volumes': 2} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta)", "tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=50, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)", "support is not enabled') def test_update_resource_volume(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by(", "from mock import Mock from trove.quota.quota import DbQuotaDriver from trove.quota.models", "[Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES)", "# # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law", "distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR", "= self.orig_QuotaUsage_create QuotaUsage.save = self.orig_QuotaUsage_save Reservation.save = self.orig_Reservation_save def test_get_defaults(self):", "self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1,", "QUOTAS.reserve = self.quota_reserve_orig QUOTAS.rollback = self.quota_rollback_orig QUOTAS.commit = self.quota_commit_orig def", "# # Licensed under the Apache License, Version 2.0 (the", "self.assertEquals(2, kw['usage_id']) self.assertEquals(3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_reserve_resource_unknown(self): delta =", "self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(-1,", "resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA) usages", "FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called)", "{ Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'), Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'), } FAKE_TENANT1 =", "self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called) class QuotaControllerTest(testtools.TestCase): def setUp(self): super(QuotaControllerTest, self).setUp() context =", "test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1,", "in_use=1, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[])", "self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called)", "QuotaUsage.find_by = self.orig_QuotaUsage_find_by Reservation.create = self.orig_Reservation_create QuotaUsage.create = self.orig_QuotaUsage_create QuotaUsage.save", "= self.orig_Quota_find_all QuotaUsage.find_all = self.orig_QuotaUsage_find_all QuotaUsage.find_by = self.orig_QuotaUsage_find_by Reservation.create =", "resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS) usages", "Mock(return_value=FAKE_QUOTAS) delta = {'instances': 5, 'volumes': 3} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1,", "= self.orig_QuotaUsage_save Reservation.save = self.orig_Reservation_save def test_get_defaults(self): defaults = self.driver.get_defaults(resources)", "= self.quota_reserve_orig QUOTAS.rollback = self.quota_rollback_orig QUOTAS.commit = self.quota_commit_orig def test_run_with_quotas(self):", "QuotaUsage.find_by self.orig_Reservation_create = Reservation.create self.orig_QuotaUsage_create = QuotaUsage.create self.orig_QuotaUsage_save = QuotaUsage.save", "in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[])", "QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(return_value=FAKE_QUOTA)", "obtain # a copy of the License at # #", "FAKE_TENANT2) def test_update_resource_no_value(self): quota = mock(Quota) when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2, resource='instances').thenReturn(quota) body =", "Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(-3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_commit(self): Reservation.save", "mockito import mock, when, unstub, any, verify, never, times from", "_, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(2, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status'])", "= Mock(return_value=self.mock_quota_result) QuotaUsage.find_all = Mock(return_value=self.mock_usage_result) def tearDown(self): super(DbQuotaDriverTest, self).tearDown() Quota.find_all", "\"\"\" Unit tests for the classes and functions in DbQuotaDriver.py.", "Version 2.0 (the \"License\"); you may # not use this", "resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(CONF.max_instances_per_user, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES,", "self.orig_QuotaUsage_find_by Reservation.create = self.orig_Reservation_create QuotaUsage.create = self.orig_QuotaUsage_create QuotaUsage.save = self.orig_QuotaUsage_save", "from trove.common import cfg from trove.quota.quota import run_with_quotas from trove.quota.quota", "def test_update_resource_volume(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) volume_quota =", "self.orig_Reservation_save = Reservation.save self.mock_quota_result = Mock() self.mock_usage_result = Mock() Quota.find_all", "run_with_quotas from trove.quota.quota import QUOTAS \"\"\" Unit tests for the", "mock() when(req.environ).get(any()).thenReturn(context) self.req = req self.controller = QuotaController() def tearDown(self):", "Mock() def tearDown(self): super(Run_with_quotasTest, self).tearDown() QUOTAS.reserve = self.quota_reserve_orig QUOTAS.rollback =", "tearDown(self): super(DbQuotaDriverTest, self).tearDown() Quota.find_all = self.orig_Quota_find_all QuotaUsage.find_all = self.orig_QuotaUsage_find_all QuotaUsage.find_by", "QuotaUsage.create = self.orig_QuotaUsage_create QuotaUsage.save = self.orig_QuotaUsage_save Reservation.save = self.orig_Reservation_save def", "= Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(-1, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw", "OpenStack Foundation # # Licensed under the Apache License, Version", "FAKE_TENANT1, FAKE_TENANT2) def test_update_resource_no_value(self): quota = mock(Quota) when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2, resource='instances').thenReturn(quota) body", "quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS", "kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED,", "self.assertEquals(1, usage.reserved) def test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)", "self.assertRaises(exception.QuotaResourceUnknown, self.controller.update, self.req, body, FAKE_TENANT1, FAKE_TENANT2) def test_update_resource_no_value(self): quota =", "License for the specific language governing permissions and limitations #", "self.assertEquals(0, usage.in_use) self.assertEquals(0, usage.reserved) def test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "= Mock(return_value=FAKE_QUOTAS) delta = {'instances': 1, 'volumes': CONF.max_volumes_per_user + 1}", "FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usage", "reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1,", "Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5, FAKE_QUOTAS[0].in_use) self.assertEqual(1,", "resource='instances').thenReturn(instance_quota) volume_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='volumes').thenReturn(volume_quota) body = {'quotas':", "on an \"AS IS\" BASIS, WITHOUT # WARRANTIES OR CONDITIONS", "Mock(return_value=[]) QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id)", "= {'instances': 5, 'volumes': 3} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta)", "= mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='volumes').thenReturn(volume_quota) body = {'quotas': {'instances': None,", "Copyright 2012 OpenStack Foundation # # Licensed under the Apache", "self.assertTrue(f.called) def test_run_with_quotas_error(self): f = Mock(side_effect=Exception()) self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1, {'instances':", "http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed", "self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_commit(self): Reservation.save = Mock() QuotaUsage.save = Mock()", "self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_quota_usage_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES,", "enabled') def test_update_resource_volume(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) volume_quota", "tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_quota_result.all", "Mock(return_value=FAKE_QUOTAS) delta = {'instances': 1, 'volumes': CONF.max_volumes_per_user + 1} self.assertRaises(exception.QuotaExceeded,", "status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK,", "Mock(return_value=FAKE_QUOTAS) delta = {'instances': 4, 'volumes': 2} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1,", "self.orig_QuotaUsage_save Reservation.save = self.orig_Reservation_save def test_get_defaults(self): defaults = self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_user,", "self.assertEquals(2, usages[Resource.INSTANCES].in_use) self.assertEquals(1, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(1, usages[Resource.VOLUMES].in_use)", "= mock() req.environ = mock() when(req.environ).get(any()).thenReturn(context) self.req = req self.controller", "self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(1, usages[Resource.VOLUMES].in_use) self.assertEquals(1, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_all_default(self):", "None}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(quota, never).save() self.assertEquals(200,", "self.assertEquals(3, usage.in_use) self.assertEquals(1, usage.reserved) def test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES,", "defaults[Resource.VOLUMES]) def test_get_quota_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)] self.mock_quota_result.all =", "def test_reserve_over_quota_with_usage(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=0), QuotaUsage(id=2,", "quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all = Mock(return_value=[]) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys())", "body = {'quotas': {'unknown_resource': 5}} self.assertRaises(exception.QuotaResourceUnknown, self.controller.update, self.req, body, FAKE_TENANT1,", "= Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(-3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_commit(self):", "Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22,", "3} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS =", "trove.quota.quota import QUOTAS \"\"\" Unit tests for the classes and", "quota.resource) self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit) def test_get_all_quotas_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22),", "FAKE_RESERVATIONS[1].status) def test_rollback(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS", "mock, when, unstub, any, verify, never, times from mock import", "= mock() when(req.environ).get(any()).thenReturn(context) self.req = req self.controller = QuotaController() def", "result.status) self.assertEquals(10, result._data['quotas']['volumes']) class DbQuotaDriverTest(testtools.TestCase): def setUp(self): super(DbQuotaDriverTest, self).setUp() self.driver", "= mock() context.is_admin = True req = mock() req.environ =", "self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called) class QuotaControllerTest(testtools.TestCase): def setUp(self): super(QuotaControllerTest, self).setUp()", "usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(0, usage.in_use)", "Licensed under the Apache License, Version 2.0 (the \"License\"); you", "Resource(Resource.INSTANCES, 'max_instances_per_user'), Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'), } FAKE_TENANT1 = \"123456\" FAKE_TENANT2", "super(DbQuotaDriverTest, self).tearDown() Quota.find_all = self.orig_Quota_find_all QuotaUsage.find_all = self.orig_QuotaUsage_find_all QuotaUsage.find_by =", "quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.VOLUMES, quota.resource) self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit)", "= Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2,", "from trove.quota.quota import run_with_quotas from trove.quota.quota import QUOTAS \"\"\" Unit", "License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by", "= req self.controller = QuotaController() def tearDown(self): super(QuotaControllerTest, self).tearDown() unstub()", "delta = {'instances': 10, 'volumes': 2000, 'Fake_resource': 123} self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve,", "= [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_usage_result.all", "self).tearDown() Quota.find_all = self.orig_Quota_find_all QuotaUsage.find_all = self.orig_QuotaUsage_find_all QuotaUsage.find_by = self.orig_QuotaUsage_find_by", "usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_reserve(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "resources, delta) def test_reserve_over_quota(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0,", "= Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[0].status) self.assertEqual(1,", "\"123456\" FAKE_TENANT2 = \"654321\" class Run_with_quotasTest(testtools.TestCase): def setUp(self): super(Run_with_quotasTest, self).setUp()", "result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, never).save() self.assertFalse('instances' in", "usages[Resource.VOLUMES].in_use) self.assertEquals(1, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0,", "reserved=1)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock()", "usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(1, usages[Resource.VOLUMES].in_use) self.assertEquals(1, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS", "the classes and functions in DbQuotaDriver.py. \"\"\" CONF = cfg.CONF", "self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, never).save() self.assertFalse('instances' in result._data['quotas']) verify(volume_quota,", "quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit) def", "usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(1, usages[Resource.VOLUMES].in_use) self.assertEquals(1, usages[Resource.VOLUMES].reserved) def", "QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES,", "resource=Resource.INSTANCES, in_use=2, reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)", "10, 'volumes': 2000, 'Fake_resource': 123} self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve, FAKE_TENANT1, resources, delta)", "self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES,", "in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_quota_result.all = Mock(return_value=[])", "volume_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='volumes').thenReturn(volume_quota) body = {'quotas': {'instances':", "self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called) def test_run_with_quotas_error(self): f = Mock(side_effect=Exception()) self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1,", "Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 1, 'volumes': CONF.max_volumes_per_user", "compliance with the License. You may obtain # a copy", "resources, delta) def test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=10,", "Mock(return_value=[]) QuotaUsage.create = Mock(return_value=FAKE_QUOTA) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id)", "self.assertEquals(2, kw['usage_id']) self.assertEquals(-3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_commit(self): Reservation.save =", "QUOTAS.reserve = Mock() QUOTAS.rollback = Mock() QUOTAS.commit = Mock() def", "= Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create =", "delta = {'instances': 4, 'volumes': 2} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources,", "result.status) def test_update_resource_instance(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) body", "test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=10, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1,", "QUOTAS.commit = Mock() def tearDown(self): super(Run_with_quotasTest, self).tearDown() QUOTAS.reserve = self.quota_reserve_orig", "tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2,", "FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create", "from trove.common import exception from trove.common import cfg from trove.quota.quota", "verify(instance_quota, times=1).save() self.assertTrue('instances' in result._data['quotas']) self.assertEquals(200, result.status) self.assertEquals(2, result._data['quotas']['instances']) @testtools.skipIf(not", "'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called) def test_run_with_quotas_error(self): f", "self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(3, usage.in_use) self.assertEquals(1, usage.reserved)", "= QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.create =", "reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock()", "kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(-3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def", "Mock from trove.quota.quota import DbQuotaDriver from trove.quota.models import Resource from", "Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(3, usage.in_use) self.assertEquals(1, usage.reserved) def", "'volumes': 2000, 'Fake_resource': 123} self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve, FAKE_TENANT1, resources, delta) def", "[QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)]", "import mock, when, unstub, any, verify, never, times from mock", "the # License for the specific language governing permissions and", "QuotaUsage.save = self.orig_QuotaUsage_save Reservation.save = self.orig_Reservation_save def test_get_defaults(self): defaults =", "# # Unless required by applicable law or agreed to", "delta) def test_reserve_over_quota(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0),", "self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1,", "verify(instance_quota, never).save() self.assertFalse('instances' in result._data['quotas']) verify(volume_quota, times=1).save() self.assertEquals(200, result.status) self.assertEquals(10,", "usage.in_use) self.assertEquals(1, usage.reserved) def test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0,", "DbQuotaDriverTest(testtools.TestCase): def setUp(self): super(DbQuotaDriverTest, self).setUp() self.driver = DbQuotaDriver(resources) self.orig_Quota_find_all =", "delta = {'instances': 5, 'volumes': 3} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources,", "Mock() Reservation.create = Mock() delta = {'instances': 2, 'volumes': 3}", "resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_quota_result.all =", "mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) body = {'quotas': {'instances': 2}} result", "= self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(0, usage.in_use) self.assertEquals(0,", "verify(quota, never).save() self.assertEquals(200, result.status) def test_update_resource_instance(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by(", "usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(0, usages[Resource.INSTANCES].in_use) self.assertEquals(0, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES,", "status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED,", "limitations # under the License. import testtools from mockito import", "= QuotaUsage.create self.orig_QuotaUsage_save = QuotaUsage.save self.orig_Reservation_save = Reservation.save self.mock_quota_result =", "in result._data['quotas']) self.assertEquals(200, result.status) self.assertEquals(2, result._data['quotas']['instances']) @testtools.skipIf(not CONF.trove_volume_support, 'Volume support", "quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all = Mock(return_value=[])", "QuotaUsage.save = Mock() Reservation.create = Mock() delta = {'instances': -1,", "'volumes': 3} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS", "delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved)", "= self.orig_Reservation_save def test_get_defaults(self): defaults = self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_user, defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_user,", "Mock() Quota.find_all = Mock(return_value=self.mock_quota_result) QuotaUsage.find_all = Mock(return_value=self.mock_usage_result) def tearDown(self): super(DbQuotaDriverTest,", "[QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)]", "= self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(quota, never).save() self.assertEquals(200, result.status) def", "quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit)", "result._data['quotas']) verify(volume_quota, times=1).save() self.assertEquals(200, result.status) self.assertEquals(10, result._data['quotas']['volumes']) class DbQuotaDriverTest(testtools.TestCase): def", "Mock(return_value=[]) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(CONF.max_instances_per_user,", "self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_quota_usage_by_tenant(self): FAKE_QUOTAS =", "import Mock from trove.quota.quota import DbQuotaDriver from trove.quota.models import Resource", "mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='volumes').thenReturn(volume_quota) body = {'quotas': {'instances': None, 'volumes':", "= QUOTAS.commit QUOTAS.reserve = Mock() QUOTAS.rollback = Mock() QUOTAS.commit =", "self.quota_commit_orig = QUOTAS.commit QUOTAS.reserve = Mock() QUOTAS.rollback = Mock() QUOTAS.commit", "2.0 (the \"License\"); you may # not use this file", "def test_run_with_quotas_error(self): f = Mock(side_effect=Exception()) self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1, {'instances': 1,", "tenant_id=FAKE_TENANT2, resource='volumes').thenReturn(volume_quota) body = {'quotas': {'instances': None, 'volumes': 10}} result", "123} self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota(self): FAKE_QUOTAS =", "self.assertEqual(6, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) self.assertEqual(3, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved)", "self).setUp() self.quota_reserve_orig = QUOTAS.reserve self.quota_rollback_orig = QUOTAS.rollback self.quota_commit_orig = QUOTAS.commit", "FAKE_TENANT2) verify(quota, never).save() self.assertEquals(200, result.status) def test_update_resource_instance(self): instance_quota = mock(Quota)", "def test_reserve(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2,", "classes and functions in DbQuotaDriver.py. \"\"\" CONF = cfg.CONF resources", "self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS = [QuotaUsage(id=1,", "Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) self.assertEqual(3, FAKE_QUOTAS[1].in_use)", "governing permissions and limitations # under the License. import testtools", "instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) volume_quota = mock(Quota) when(DatabaseModelBase).find_by(", "from trove.quota.models import Reservation from trove.db.models import DatabaseModelBase from trove.extensions.mgmt.quota.service", "= \"123456\" FAKE_TENANT2 = \"654321\" class Run_with_quotasTest(testtools.TestCase): def setUp(self): super(Run_with_quotasTest,", "verify, never, times from mock import Mock from trove.quota.quota import", "self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 4, 'volumes': 2} self.assertRaises(exception.QuotaExceeded,", "FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)] self.mock_quota_result.all =", "usages[Resource.VOLUMES].resource) self.assertEquals(1, usages[Resource.VOLUMES].in_use) self.assertEquals(1, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,", "= Mock(return_value=FAKE_QUOTAS) delta = {'instances': 4, 'volumes': 2} self.assertRaises(exception.QuotaExceeded, self.driver.reserve,", "by applicable law or agreed to in writing, software #", "self.orig_Reservation_create = Reservation.create self.orig_QuotaUsage_create = QuotaUsage.create self.orig_QuotaUsage_save = QuotaUsage.save self.orig_Reservation_save", "self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(15, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_all_default(self): self.mock_quota_result.all = Mock(return_value=[]) quotas", "= Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 5, 'volumes':", "{'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called) class", "import QuotaUsage from trove.quota.models import Reservation from trove.db.models import DatabaseModelBase", "test_update_unknown_resource(self): body = {'quotas': {'unknown_resource': 5}} self.assertRaises(exception.QuotaResourceUnknown, self.controller.update, self.req, body,", "mock(Quota) when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2, resource='instances').thenReturn(quota) body = {'quotas': {'instances': None}} result =", "-3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id'])", "reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by", "mock() context.is_admin = True req = mock() req.environ = mock()", "test_get_defaults(self): defaults = self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_user, defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_user, defaults[Resource.VOLUMES]) def test_get_quota_by_tenant(self):", "Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create = Mock()", "never).save() self.assertEquals(200, result.status) def test_update_resource_instance(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2,", "= [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0,", "self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[0].status) self.assertEqual(3, FAKE_QUOTAS[1].in_use) self.assertEqual(0,", "[QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0)] NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)", "setUp(self): super(Run_with_quotasTest, self).setUp() self.quota_reserve_orig = QUOTAS.reserve self.quota_rollback_orig = QUOTAS.rollback self.quota_commit_orig", "Reservation.save = self.orig_Reservation_save def test_get_defaults(self): defaults = self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_user, defaults[Resource.INSTANCES])", "BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either", "quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def", "QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED),", "delta = {'instances': 2, 'volumes': 3} self.driver.reserve(FAKE_TENANT1, resources, delta) _,", "QuotaUsage from trove.quota.models import Reservation from trove.db.models import DatabaseModelBase from", "instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) body = {'quotas': {'instances':", "self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(3, usage.in_use) self.assertEquals(1, usage.reserved) def test_get_quota_usage_by_tenant_default(self):", "resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(return_value=FAKE_QUOTA) usage", "FAKE_TENANT1 = \"123456\" FAKE_TENANT2 = \"654321\" class Run_with_quotasTest(testtools.TestCase): def setUp(self):", "in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA) usages =", "= [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0,", "kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(2, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _,", "def test_get_all_quota_usages_by_tenant_with_all_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES,", "self.assertEquals(2, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id'])", "kw['status']) def test_commit(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS", "import DatabaseModelBase from trove.extensions.mgmt.quota.service import QuotaController from trove.common import exception", "= {'quotas': {'instances': 2}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2)", "Foundation # # Licensed under the Apache License, Version 2.0", "tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS", "1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called) def test_run_with_quotas_error(self):", "2}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, times=1).save() self.assertTrue('instances'", "self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS =", "self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES,", "QUOTAS.rollback self.quota_commit_orig = QUOTAS.commit QUOTAS.reserve = Mock() QUOTAS.rollback = Mock()", "resource=Resource.INSTANCES, hard_limit=12)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1,", "= self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, never).save() self.assertFalse('instances' in result._data['quotas'])", "= Mock(return_value=FAKE_QUOTA) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource)", "test_commit(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1,", "FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES,", "test_reserve_over_quota(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1,", "may obtain # a copy of the License at #", "self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_reserve(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1,", "self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_all_quotas_by_tenant_with_one_default(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called) class QuotaControllerTest(testtools.TestCase):", "Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'), Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'), } FAKE_TENANT1 = \"123456\"", "self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 1,", "from trove.quota.models import Resource from trove.quota.models import Quota from trove.quota.models", "self.assertEquals(-3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_commit(self): Reservation.save = Mock() QuotaUsage.save", "delta) _, kw = Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(-1, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED,", "Unless required by applicable law or agreed to in writing,", "[QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)]", "= Mock(side_effect=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource)", "def test_run_with_quotas(self): f = Mock() run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5},", "QUOTAS \"\"\" Unit tests for the classes and functions in", "Reservation.create = self.orig_Reservation_create QuotaUsage.create = self.orig_QuotaUsage_create QuotaUsage.save = self.orig_QuotaUsage_save Reservation.save", "= self.quota_rollback_orig QUOTAS.commit = self.quota_commit_orig def test_run_with_quotas(self): f = Mock()", "FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas =", "when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) volume_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='volumes').thenReturn(volume_quota) body", "resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0)] self.mock_usage_result.all = Mock(return_value=[])", "in_use=5, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=2)] FAKE_RESERVATIONS = [Reservation(usage_id=1,", "QuotaController() def tearDown(self): super(QuotaControllerTest, self).tearDown() unstub() def test_update_unknown_resource(self): body =", "usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(3, usage.in_use) self.assertEquals(1, usage.reserved) def test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA", "run_with_quotas, FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called)", "self.assertEquals(1, kw['usage_id']) self.assertEquals(-1, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1]", "= Mock(return_value=[]) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource)", "= Mock() Reservation.create = Mock() delta = {'instances': 2, 'volumes':", "from mockito import mock, when, unstub, any, verify, never, times", "quota.hard_limit) def test_get_quota_by_tenant_default(self): self.mock_quota_result.all = Mock(return_value=[]) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES)", "= Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 4, 'volumes':", "applicable law or agreed to in writing, software # distributed", "Mock(return_value=FAKE_QUOTAS) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(3,", "= {'quotas': {'instances': None, 'volumes': 10}} result = self.controller.update(self.req, body,", "= {'quotas': {'unknown_resource': 5}} self.assertRaises(exception.QuotaResourceUnknown, self.controller.update, self.req, body, FAKE_TENANT1, FAKE_TENANT2)", "self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called) def test_run_with_quotas_error(self): f = Mock(side_effect=Exception()) self.assertRaises(Exception,", "result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, times=1).save() self.assertTrue('instances' in", "self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys())", "Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_reserve_resource_unknown(self): delta", "Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.INSTANCES, quota.resource) self.assertEquals(12, quota.hard_limit) def test_get_quota_by_tenant_default(self): self.mock_quota_result.all", "= [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2, reserved=1), QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_usage_result.all", "QuotaUsage.find_all = self.orig_QuotaUsage_find_all QuotaUsage.find_by = self.orig_QuotaUsage_find_by Reservation.create = self.orig_Reservation_create QuotaUsage.create", "trove.quota.models import Quota from trove.quota.models import QuotaUsage from trove.quota.models import", "Reservation from trove.db.models import DatabaseModelBase from trove.extensions.mgmt.quota.service import QuotaController from", "= Mock() delta = {'instances': 2, 'volumes': 3} self.driver.reserve(FAKE_TENANT1, resources,", "self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES,", "OF ANY KIND, either express or implied. See the #", "DbQuotaDriver.py. \"\"\" CONF = cfg.CONF resources = { Resource.INSTANCES: Resource(Resource.INSTANCES,", "import exception from trove.common import cfg from trove.quota.quota import run_with_quotas", "Mock(return_value=FAKE_QUOTAS) QuotaUsage.save = Mock() Reservation.create = Mock() delta = {'instances':", "cfg from trove.quota.quota import run_with_quotas from trove.quota.quota import QUOTAS \"\"\"", "def test_update_resource_instance(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) body =", "self.assertFalse('instances' in result._data['quotas']) verify(volume_quota, times=1).save() self.assertEquals(200, result.status) self.assertEquals(10, result._data['quotas']['volumes']) class", "{'quotas': {'instances': None, 'volumes': 10}} result = self.controller.update(self.req, body, FAKE_TENANT1,", "self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.INSTANCES,", "test_update_resource_volume(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) volume_quota = mock(Quota)", "is not enabled') def test_update_resource_volume(self): instance_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2,", "WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express", "in writing, software # distributed under the License is distributed", "and functions in DbQuotaDriver.py. \"\"\" CONF = cfg.CONF resources =", "= [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) usage =", "self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 5, 'volumes': 3} self.assertRaises(exception.QuotaExceeded,", "{'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called) def", "self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(return_value=FAKE_QUOTA) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES)", "def test_rollback(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS =", "defaults = self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_user, defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_user, defaults[Resource.VOLUMES]) def test_get_quota_by_tenant(self): FAKE_QUOTAS", "Mock(return_value=NEW_FAKE_QUOTA) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(0,", "FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES,", "self.mock_quota_result.all = Mock(return_value=[]) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES,", "kw['usage_id']) self.assertEquals(3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_reserve_resource_unknown(self): delta = {'instances':", "from trove.db.models import DatabaseModelBase from trove.extensions.mgmt.quota.service import QuotaController from trove.common", "5}, f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called) def test_run_with_quotas_error(self): f =", "resources = { Resource.INSTANCES: Resource(Resource.INSTANCES, 'max_instances_per_user'), Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'), }", "= Mock(return_value=[]) QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1,", "self.assertTrue(f.called) class QuotaControllerTest(testtools.TestCase): def setUp(self): super(QuotaControllerTest, self).setUp() context = mock()", "times=1).save() self.assertTrue('instances' in result._data['quotas']) self.assertEquals(200, result.status) self.assertEquals(2, result._data['quotas']['instances']) @testtools.skipIf(not CONF.trove_volume_support,", "setUp(self): super(QuotaControllerTest, self).setUp() context = mock() context.is_admin = True req", "test_run_with_quotas_error(self): f = Mock(side_effect=Exception()) self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1, {'instances': 1, 'volumes':", "= Mock(return_value=self.mock_usage_result) def tearDown(self): super(DbQuotaDriverTest, self).tearDown() Quota.find_all = self.orig_Quota_find_all QuotaUsage.find_all", "= [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0)] NEW_FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0,", "FAKE_TENANT1, resources, delta) def test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "self.assertEqual(CONF.max_instances_per_user, defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_user, defaults[Resource.VOLUMES]) def test_get_quota_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "= Mock(return_value=[]) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.VOLUMES, quota.resource)", "'max_volumes_per_user'), } FAKE_TENANT1 = \"123456\" FAKE_TENANT2 = \"654321\" class Run_with_quotasTest(testtools.TestCase):", "'Volume support is not enabled') def test_update_resource_volume(self): instance_quota = mock(Quota)", "self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(3, kw['delta'])", "Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(-1, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw =", "= Mock() Quota.find_all = Mock(return_value=self.mock_quota_result) QuotaUsage.find_all = Mock(return_value=self.mock_usage_result) def tearDown(self):", "QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES,", "kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_reserve_resource_unknown(self): delta = {'instances': 10, 'volumes':", "kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(-3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED,", "self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status) def test_rollback(self): Reservation.save = Mock() QuotaUsage.save", "super(DbQuotaDriverTest, self).setUp() self.driver = DbQuotaDriver(resources) self.orig_Quota_find_all = Quota.find_all self.orig_QuotaUsage_find_all =", "= self.orig_QuotaUsage_find_by Reservation.create = self.orig_Reservation_create QuotaUsage.create = self.orig_QuotaUsage_create QuotaUsage.save =", "FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES,", "= [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1,", "delta) def test_reserve_over_quota_but_can_apply_negative_deltas(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=10, reserved=0),", "Resource(Resource.VOLUMES, 'max_volumes_per_user'), } FAKE_TENANT1 = \"123456\" FAKE_TENANT2 = \"654321\" class", "self.assertTrue(QUOTAS.commit.called) self.assertFalse(QUOTAS.rollback.called) self.assertTrue(f.called) def test_run_with_quotas_error(self): f = Mock(side_effect=Exception()) self.assertRaises(Exception, run_with_quotas,", "self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 1, 'volumes': CONF.max_volumes_per_user +", "trove.quota.models import Reservation from trove.db.models import DatabaseModelBase from trove.extensions.mgmt.quota.service import", "kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def", "= QuotaUsage.find_all self.orig_QuotaUsage_find_by = QuotaUsage.find_by self.orig_Reservation_create = Reservation.create self.orig_QuotaUsage_create =", "either express or implied. See the # License for the", "self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(0, usages[Resource.INSTANCES].in_use) self.assertEquals(0, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource)", "True req = mock() req.environ = mock() when(req.environ).get(any()).thenReturn(context) self.req =", "kw['usage_id']) self.assertEquals(2, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2,", "Quota from trove.quota.models import QuotaUsage from trove.quota.models import Reservation from", "= Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(3, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_reserve_resource_unknown(self):", "self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(2, usages[Resource.INSTANCES].in_use) self.assertEquals(1, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource)", "copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #", "kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2, kw['usage_id']) self.assertEquals(-3,", "resource='instances').thenReturn(quota) body = {'quotas': {'instances': None}} result = self.controller.update(self.req, body,", "may # not use this file except in compliance with", "from trove.quota.models import QuotaUsage from trove.quota.models import Reservation from trove.db.models", "in DbQuotaDriver.py. \"\"\" CONF = cfg.CONF resources = { Resource.INSTANCES:", "@testtools.skipIf(not CONF.trove_volume_support, 'Volume support is not enabled') def test_update_resource_volume(self): instance_quota", "# License for the specific language governing permissions and limitations", "with the License. You may obtain # a copy of", "= [Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS)", "QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=5, reserved=2),", "self.assertEqual(1, FAKE_QUOTAS[0].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[0].status) self.assertEqual(1, FAKE_QUOTAS[1].in_use) self.assertEqual(0, FAKE_QUOTAS[1].reserved) self.assertEqual(Reservation.Statuses.ROLLEDBACK, FAKE_RESERVATIONS[1].status)", "f = Mock() run_with_quotas(FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called)", "you may # not use this file except in compliance", "{'instances': 2}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, times=1).save()", "FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, never).save() self.assertFalse('instances' in result._data['quotas']) verify(volume_quota, times=1).save() self.assertEquals(200,", "Mock(side_effect=Exception()) self.assertRaises(Exception, run_with_quotas, FAKE_TENANT1, {'instances': 1, 'volumes': 5}, f) self.assertTrue(QUOTAS.reserve.called)", "= Mock(return_value=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource)", "[Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS)", "Mock(return_value=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(2,", "self.orig_Reservation_save def test_get_defaults(self): defaults = self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_user, defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_user, defaults[Resource.VOLUMES])", "self.assertEquals(CONF.max_volumes_per_user, quotas[Resource.VOLUMES].hard_limit) def test_get_quota_usage_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3, reserved=1)]", "tenant_id=FAKE_TENANT2, resource='instances').thenReturn(instance_quota) volume_quota = mock(Quota) when(DatabaseModelBase).find_by( tenant_id=FAKE_TENANT2, resource='volumes').thenReturn(volume_quota) body =", "usage.in_use) self.assertEquals(0, usage.reserved) def test_get_all_quota_usages_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=2,", "delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.rollback(FAKE_RESERVATIONS) self.assertEqual(5, FAKE_QUOTAS[0].in_use) self.assertEqual(1, FAKE_QUOTAS[0].reserved)", "defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_user, defaults[Resource.VOLUMES]) def test_get_quota_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)]", "result._data['quotas']['instances']) @testtools.skipIf(not CONF.trove_volume_support, 'Volume support is not enabled') def test_update_resource_volume(self):", "[Reservation(usage_id=1, delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS)", "self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_reserve(self):", "{'instances': 1, 'volumes': CONF.max_volumes_per_user + 1} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources,", "# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or", "tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=10, reserved=0), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=50, reserved=0)] self.mock_quota_result.all", "self.driver.get_quota_usage_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(0, usage.in_use) self.assertEquals(0, usage.reserved)", "self.assertEquals(200, result.status) self.assertEquals(2, result._data['quotas']['instances']) @testtools.skipIf(not CONF.trove_volume_support, 'Volume support is not", "self.assertEquals(12, quota.hard_limit) def test_get_quota_by_tenant_default(self): self.mock_quota_result.all = Mock(return_value=[]) quota = self.driver.get_quota_by_tenant(FAKE_TENANT1,", "# WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "usages[Resource.VOLUMES].resource) self.assertEquals(0, usages[Resource.VOLUMES].in_use) self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1,", "tearDown(self): super(Run_with_quotasTest, self).tearDown() QUOTAS.reserve = self.quota_reserve_orig QUOTAS.rollback = self.quota_rollback_orig QUOTAS.commit", "FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_usage(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES,", "the License is distributed on an \"AS IS\" BASIS, WITHOUT", "self.orig_Reservation_create QuotaUsage.create = self.orig_QuotaUsage_create QuotaUsage.save = self.orig_QuotaUsage_save Reservation.save = self.orig_Reservation_save", "self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource)", "Mock() QUOTAS.rollback = Mock() QUOTAS.commit = Mock() def tearDown(self): super(Run_with_quotasTest,", "'max_instances_per_user'), Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'), } FAKE_TENANT1 = \"123456\" FAKE_TENANT2 =", "= QuotaUsage.save self.orig_Reservation_save = Reservation.save self.mock_quota_result = Mock() self.mock_usage_result =", "self).tearDown() unstub() def test_update_unknown_resource(self): body = {'quotas': {'unknown_resource': 5}} self.assertRaises(exception.QuotaResourceUnknown,", "setUp(self): super(DbQuotaDriverTest, self).setUp() self.driver = DbQuotaDriver(resources) self.orig_Quota_find_all = Quota.find_all self.orig_QuotaUsage_find_all", "self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(0, usages[Resource.INSTANCES].in_use) self.assertEquals(0, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)", "trove.common import exception from trove.common import cfg from trove.quota.quota import", "def test_get_all_quota_usages_by_tenant_with_one_default(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0)] NEW_FAKE_QUOTA =", "QUOTAS.commit QUOTAS.reserve = Mock() QUOTAS.rollback = Mock() QUOTAS.commit = Mock()", "= {'instances': -1, 'volumes': -3} self.driver.reserve(FAKE_TENANT1, resources, delta) _, kw", "a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 #", "5}} self.assertRaises(exception.QuotaResourceUnknown, self.controller.update, self.req, body, FAKE_TENANT1, FAKE_TENANT2) def test_update_resource_no_value(self): quota", "1} self.assertRaises(exception.QuotaExceeded, self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_usage(self): FAKE_QUOTAS =", "} FAKE_TENANT1 = \"123456\" FAKE_TENANT2 = \"654321\" class Run_with_quotasTest(testtools.TestCase): def", "{'instances': 10, 'volumes': 2000, 'Fake_resource': 123} self.assertRaises(exception.QuotaResourceUnknown, self.driver.reserve, FAKE_TENANT1, resources,", "{'instances': None, 'volumes': 10}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2)", "import QuotaController from trove.common import exception from trove.common import cfg", "self.driver.get_defaults(resources) self.assertEqual(CONF.max_instances_per_user, defaults[Resource.INSTANCES]) self.assertEqual(CONF.max_volumes_per_user, defaults[Resource.VOLUMES]) def test_get_quota_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1,", "for the specific language governing permissions and limitations # under", "resources.keys()) self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(0, usages[Resource.INSTANCES].in_use) self.assertEquals(0, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1,", "resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.save", "mock() req.environ = mock() when(req.environ).get(any()).thenReturn(context) self.req = req self.controller =", "= [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1,", "def test_update_resource_no_value(self): quota = mock(Quota) when(DatabaseModelBase).find_by(tenant_id=FAKE_TENANT2, resource='instances').thenReturn(quota) body = {'quotas':", "Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.VOLUMES, quota.resource) self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit) def test_get_all_quotas_by_tenant(self): FAKE_QUOTAS", "except in compliance with the License. You may obtain #", "quota.hard_limit) def test_get_all_quotas_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES,", "Resource.VOLUMES: Resource(Resource.VOLUMES, 'max_volumes_per_user'), } FAKE_TENANT1 = \"123456\" FAKE_TENANT2 = \"654321\"", "FAKE_TENANT1, FAKE_TENANT2) verify(quota, never).save() self.assertEquals(200, result.status) def test_update_resource_instance(self): instance_quota =", "def setUp(self): super(DbQuotaDriverTest, self).setUp() self.driver = DbQuotaDriver(resources) self.orig_Quota_find_all = Quota.find_all", "QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all =", "in result._data['quotas']) verify(volume_quota, times=1).save() self.assertEquals(200, result.status) self.assertEquals(10, result._data['quotas']['volumes']) class DbQuotaDriverTest(testtools.TestCase):", "Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, usage.tenant_id) self.assertEquals(Resource.VOLUMES, usage.resource) self.assertEquals(0, usage.in_use) self.assertEquals(0, usage.reserved) def", "self.mock_usage_result = Mock() Quota.find_all = Mock(return_value=self.mock_quota_result) QuotaUsage.find_all = Mock(return_value=self.mock_usage_result) def", "FAKE_TENANT1, FAKE_TENANT2) verify(instance_quota, times=1).save() self.assertTrue('instances' in result._data['quotas']) self.assertEquals(200, result.status) self.assertEquals(2,", "self.driver = DbQuotaDriver(resources) self.orig_Quota_find_all = Quota.find_all self.orig_QuotaUsage_find_all = QuotaUsage.find_all self.orig_QuotaUsage_find_by", "resources, delta) def test_reserve_over_quota_with_usage(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1,", "def test_get_all_quotas_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)]", "[Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys())", "language governing permissions and limitations # under the License. import", "self.assertEquals(0, usages[Resource.VOLUMES].reserved) def test_reserve(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=1,", "Reservation.create.call_args_list[0] self.assertEquals(1, kw['usage_id']) self.assertEquals(2, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw =", "License. You may obtain # a copy of the License", "self.assertEqual(CONF.max_volumes_per_user, defaults[Resource.VOLUMES]) def test_get_quota_by_tenant(self): FAKE_QUOTAS = [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=12)] self.mock_quota_result.all", "import DbQuotaDriver from trove.quota.models import Resource from trove.quota.models import Quota", "QuotaUsage.save = Mock() Reservation.create = Mock() delta = {'instances': 2,", "self.assertEqual(Reservation.Statuses.COMMITTED, FAKE_RESERVATIONS[1].status) def test_rollback(self): Reservation.save = Mock() QuotaUsage.save = Mock()", "reserved=0) self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(return_value=FAKE_QUOTA) usage = self.driver.get_quota_usage_by_tenant(FAKE_TENANT1,", "test_rollback(self): Reservation.save = Mock() QuotaUsage.save = Mock() FAKE_QUOTAS = [QuotaUsage(id=1,", "hard_limit=15)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)", "self.driver.get_quota_by_tenant(FAKE_TENANT1, Resource.VOLUMES) self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.INSTANCES, quota.resource) self.assertEquals(12, quota.hard_limit) def test_get_quota_by_tenant_default(self):", "self.assertEquals(FAKE_TENANT1, quota.tenant_id) self.assertEquals(Resource.VOLUMES, quota.resource) self.assertEquals(CONF.max_volumes_per_user, quota.hard_limit) def test_get_all_quotas_by_tenant(self): FAKE_QUOTAS =", "ANY KIND, either express or implied. See the # License", "# distributed under the License is distributed on an \"AS", "def setUp(self): super(QuotaControllerTest, self).setUp() context = mock() context.is_admin = True", "reserved=0) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) QuotaUsage.create = Mock(return_value=NEW_FAKE_QUOTA) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,", "# Unless required by applicable law or agreed to in", "DbQuotaDriver(resources) self.orig_Quota_find_all = Quota.find_all self.orig_QuotaUsage_find_all = QuotaUsage.find_all self.orig_QuotaUsage_find_by = QuotaUsage.find_by", "def tearDown(self): super(QuotaControllerTest, self).tearDown() unstub() def test_update_unknown_resource(self): body = {'quotas':", "= [Quota(tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, hard_limit=22), Quota(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, hard_limit=15)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS)", "self.assertEquals(10, result._data['quotas']['volumes']) class DbQuotaDriverTest(testtools.TestCase): def setUp(self): super(DbQuotaDriverTest, self).setUp() self.driver =", "verify(volume_quota, times=1).save() self.assertEquals(200, result.status) self.assertEquals(10, result._data['quotas']['volumes']) class DbQuotaDriverTest(testtools.TestCase): def setUp(self):", "resource=Resource.INSTANCES, in_use=1, reserved=2), QuotaUsage(id=2, tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=1, reserved=1)] self.mock_quota_result.all =", "test_reserve_resource_unknown(self): delta = {'instances': 10, 'volumes': 2000, 'Fake_resource': 123} self.assertRaises(exception.QuotaResourceUnknown,", "quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1, quotas[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, quotas[Resource.VOLUMES].resource) self.assertEquals(CONF.max_volumes_per_user,", "import cfg from trove.quota.quota import run_with_quotas from trove.quota.quota import QUOTAS", "test_get_quota_usage_by_tenant(self): FAKE_QUOTAS = [QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=3, reserved=1)] self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS)", "= Mock() Reservation.create = Mock() delta = {'instances': -1, 'volumes':", "Quota.find_all = Mock(return_value=self.mock_quota_result) QuotaUsage.find_all = Mock(return_value=self.mock_usage_result) def tearDown(self): super(DbQuotaDriverTest, self).tearDown()", "is distributed on an \"AS IS\" BASIS, WITHOUT # WARRANTIES", "resource='instances').thenReturn(instance_quota) body = {'quotas': {'instances': 2}} result = self.controller.update(self.req, body,", "def test_reserve_over_quota(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1, resource=Resource.INSTANCES, in_use=0, reserved=0), QuotaUsage(id=2,", "QuotaUsage.save self.orig_Reservation_save = Reservation.save self.mock_quota_result = Mock() self.mock_usage_result = Mock()", "Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta = {'instances': 4, 'volumes': 2}", "result._data['quotas']) self.assertEquals(200, result.status) self.assertEquals(2, result._data['quotas']['instances']) @testtools.skipIf(not CONF.trove_volume_support, 'Volume support is", "reserved=0)] self.mock_usage_result.all = Mock(return_value=[]) QuotaUsage.create = Mock(side_effect=FAKE_QUOTAS) usages = self.driver.get_all_quota_usages_by_tenant(FAKE_TENANT1,", "kw['usage_id']) self.assertEquals(-1, kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) _, kw = Reservation.create.call_args_list[1] self.assertEquals(2,", "test_get_quota_usage_by_tenant_default(self): FAKE_QUOTA = QuotaUsage(tenant_id=FAKE_TENANT1, resource=Resource.VOLUMES, in_use=0, reserved=0) self.mock_usage_result.all = Mock(return_value=[])", "self.req = req self.controller = QuotaController() def tearDown(self): super(QuotaControllerTest, self).tearDown()", "= {'quotas': {'instances': None}} result = self.controller.update(self.req, body, FAKE_TENANT1, FAKE_TENANT2)", "self.orig_Quota_find_all = Quota.find_all self.orig_QuotaUsage_find_all = QuotaUsage.find_all self.orig_QuotaUsage_find_by = QuotaUsage.find_by self.orig_Reservation_create", "import Quota from trove.quota.models import QuotaUsage from trove.quota.models import Reservation", "usages[Resource.INSTANCES].resource) self.assertEquals(0, usages[Resource.INSTANCES].in_use) self.assertEquals(0, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id) self.assertEquals(Resource.VOLUMES, usages[Resource.VOLUMES].resource) self.assertEquals(0,", "hard_limit=22)] self.mock_quota_result.all = Mock(return_value=FAKE_QUOTAS) quotas = self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id)", "QUOTAS.commit = self.quota_commit_orig def test_run_with_quotas(self): f = Mock() run_with_quotas(FAKE_TENANT1, {'instances':", "self.assertEquals(FAKE_TENANT1, usages[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, usages[Resource.INSTANCES].resource) self.assertEquals(2, usages[Resource.INSTANCES].in_use) self.assertEquals(1, usages[Resource.INSTANCES].reserved) self.assertEquals(FAKE_TENANT1, usages[Resource.VOLUMES].tenant_id)", "self.driver.reserve, FAKE_TENANT1, resources, delta) def test_reserve_over_quota_with_reserved(self): FAKE_QUOTAS = [QuotaUsage(id=1, tenant_id=FAKE_TENANT1,", "kw['delta']) self.assertEquals(Reservation.Statuses.RESERVED, kw['status']) def test_commit(self): Reservation.save = Mock() QuotaUsage.save =", "delta=1, status=Reservation.Statuses.RESERVED), Reservation(usage_id=2, delta=2, status=Reservation.Statuses.RESERVED)] QuotaUsage.find_by = Mock(side_effect=FAKE_QUOTAS) self.driver.commit(FAKE_RESERVATIONS) self.assertEqual(6,", "f) self.assertTrue(QUOTAS.reserve.called) self.assertTrue(QUOTAS.rollback.called) self.assertFalse(QUOTAS.commit.called) self.assertTrue(f.called) class QuotaControllerTest(testtools.TestCase): def setUp(self): super(QuotaControllerTest,", "= self.driver.get_all_quotas_by_tenant(FAKE_TENANT1, resources.keys()) self.assertEquals(FAKE_TENANT1, quotas[Resource.INSTANCES].tenant_id) self.assertEquals(Resource.INSTANCES, quotas[Resource.INSTANCES].resource) self.assertEquals(22, quotas[Resource.INSTANCES].hard_limit) self.assertEquals(FAKE_TENANT1,", "import Resource from trove.quota.models import Quota from trove.quota.models import QuotaUsage", "in_use=0, reserved=0)] self.mock_quota_result.all = Mock(return_value=[]) self.mock_usage_result.all = Mock(return_value=FAKE_QUOTAS) delta =", "body = {'quotas': {'instances': None, 'volumes': 10}} result = self.controller.update(self.req," ]
[ "flask_restful import reqparse def retornar_parser(): parser = reqparse.RequestParser() parser.add_argument('sentenca', type=str,", "from flask_restful import reqparse def retornar_parser(): parser = reqparse.RequestParser() parser.add_argument('sentenca',", "import reqparse def retornar_parser(): parser = reqparse.RequestParser() parser.add_argument('sentenca', type=str, required=True)", "def retornar_parser(): parser = reqparse.RequestParser() parser.add_argument('sentenca', type=str, required=True) return parser", "reqparse def retornar_parser(): parser = reqparse.RequestParser() parser.add_argument('sentenca', type=str, required=True) return" ]
[ "xx2 - xx1) h = np.maximum(0.0, yy2 - yy1) hbb_inter", "total_detections[i][keep] return total_detections def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size): detections", "(1024, 1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg', # r'demo/P0009_out.jpg', # (512, 512), # (1024,", "axis=1) y1 = np.min(obbs[:, 1::2], axis=1) x2 = np.max(obbs[:, 0::2],", "import numpy as np from tqdm import tqdm import DOTA_devkit.polyiou", "self.classnames = self.dataset.CLASSES self.model = init_detector(config_file, checkpoint_file, device='cuda:0') def inference_single(self,", "# TODO: check the corner case # import pdb; pdb.set_trace()", "detections, classnames, scale=1, threshold=0.3) cv2.imwrite(dstpath, img) if __name__ == '__main__':", "channel)) # print('i: ', i, 'j: ', j) chip =", "for target in roksis[:100]: out = target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn', out)) roitransformer.inference_single_vis(target,", "= [] i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]])", "wn, channel)) # print('i: ', i, 'j: ', j) chip", "= np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0,", "yy1) hbb_inter = w * h hbb_ovr = hbb_inter /", "for i in tqdm(range(int(width / slide_w + 1))): for j", "CLASS_NAMES_KR = ('소형 선박', '대형 선박', '민간 항공기', '군용 항공기',", "get_dataset(self.data_test) self.classnames = self.dataset.CLASSES self.model = init_detector(config_file, checkpoint_file, device='cuda:0') def", "except: pass inds = np.where(hbb_ovr <= thresh)[0] order = order[inds", "img) if __name__ == '__main__': #roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', # r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth')", "= mmcv.imread(imagname) height, width, channel = img.shape slide_h, slide_w =", "= np.maximum(0.0, xx2 - xx1) h = np.maximum(0.0, yy2 -", "from mmdet.datasets import get_dataset import cv2 import os import numpy", "= target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn', out)) roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn', out), (512, 512), (1024,", "DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from glob import glob roksis = glob('data/roksi2020/val/images/*.png') #target", "i in range(len(self.classnames)): keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1) total_detections[i] = total_detections[i][keep]", "np.concatenate((total_detections[cls_id], chip_detections[cls_id])) except: import pdb; pdb.set_trace() # nms for i", "y1 = np.min(obbs[:, 1::2], axis=1) x2 = np.max(obbs[:, 0::2], axis=1)", "'버스', '트럭', '기차', '크레인', '다리', '정유탱크', '댐', '운동경기장', '헬리패드', '원형", "slide_w + 1))): for j in range(int(height / slide_h) +", "= np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i],", "CLASS_NAMES_EN = ('small ship', 'large ship', 'civil airplane', 'military airplane',", "range(len(self.classnames))] for i in tqdm(range(int(width / slide_w + 1))): for", "chip_detections[cls_id])) except: import pdb; pdb.set_trace() # nms for i in", "keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1) total_detections[i] = total_detections[i][keep] return total_detections def", "cv2 import os import numpy as np from tqdm import", "total_detections[i] = total_detections[i][keep] return total_detections def inference_single_vis(self, srcpath, dstpath, slide_size,", "= chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h # import", "# import pdb; pdb.set_trace() total_detections = [np.zeros((0, 9)) for _", "xx1) h = np.maximum(0.0, yy2 - yy1) hbb_inter = w", "# (1024, 1024)) for target in roksis[:100]: out = target.split('/')[-1][:-4]+'_out.jpg'", "np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1) h =", "return total_detections def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size): detections =", "- hbb_inter) h_inds = np.where(hbb_ovr > 0)[0] tmp_order = order[h_inds", "= chip_detections[cls_id][:, :8][:, ::2] + i * slide_w chip_detections[cls_id][:, :8][:,", "not in CLASS_MAP else CLASS_MAP[cls] for cls in self.classnames] img", "> 0)[0] tmp_order = order[h_inds + 1] for j in", "= ('small ship', 'large ship', 'civil airplane', 'military airplane', 'small", "pdb; pdb.set_trace() # nms for i in range(len(self.classnames)): keep =", "polys[tmp_order[j]]) hbb_ovr[h_inds[j]] = iou try: if math.isnan(ovr[0]): pdb.set_trace() except: pass", "/ slide_w + 1))): for j in range(int(height / slide_h)", "for j in range(int(height / slide_h) + 1): subimg =", "# nms for i in range(len(self.classnames)): keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1)", "(1024, 1024)) for target in roksis[:100]: out = target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn',", "order[inds + 1] return keep class DetectorModel(): def __init__(self, config_file,", "'민간 항공기', '군용 항공기', '소형 승용차', '버스', '트럭', '기차', '크레인',", "#target = roksis[1] #out = target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target, # os.path.join('demo', out),", "'roundabout') CLASS_MAP = {k:v for k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)}", "* h hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] -", "'train', 'crane', 'bridge', 'oiltank', 'dam', 'stadium', 'helipad', 'roundabout') CLASS_MAP =", "yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1)", ":8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h", "ship', 'large ship', 'civil airplane', 'military airplane', 'small car', 'bus',", "slide_h) + 1): subimg = np.zeros((hn, wn, channel)) # print('i:", "order.size > 0: ovr = [] i = order[0] keep.append(i)", "+ 1] for j in range(tmp_order.size): iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]])", "- x1 + 1) * (y2 - y1 + 1)", "= [np.zeros((0, 9)) for _ in range(len(self.classnames))] for i in", "for i in range(len(dets)): tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1], dets[i][2], dets[i][3],", "교차로') CLASS_NAMES_EN = ('small ship', 'large ship', 'civil airplane', 'military", "np.where(hbb_ovr <= thresh)[0] order = order[inds + 1] return keep", "+ 1))): for j in range(int(height / slide_h) + 1):", "= polyiou.VectorDouble([dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4], dets[i][5], dets[i][6], dets[i][7]]) polys.append(tm_polygon)", "if cls not in CLASS_MAP else CLASS_MAP[cls] for cls in", "'헬리패드', '원형 교차로') CLASS_NAMES_EN = ('small ship', 'large ship', 'civil", "chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h # import pdb;pdb.set_trace()", "k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)} def py_cpu_nms_poly_fast_np(dets, thresh): obbs =", "<= thresh)[0] order = order[inds + 1] return keep class", "선박', '대형 선박', '민간 항공기', '군용 항공기', '소형 승용차', '버스',", "channel = img.shape slide_h, slide_w = slide_size hn, wn =", "range(len(self.classnames)): keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1) total_detections[i] = total_detections[i][keep] return total_detections", "= self.cfg.data['test'] self.dataset = get_dataset(self.data_test) self.classnames = self.dataset.CLASSES self.model =", "total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))] for i", "= np.maximum(0.0, yy2 - yy1) hbb_inter = w * h", "DOTA_devkit.polyiou as polyiou import math import pdb CLASS_NAMES_KR = ('소형", "xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w =", "{k:v for k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)} def py_cpu_nms_poly_fast_np(dets, thresh):", "/ slide_h) + 1): subimg = np.zeros((hn, wn, channel)) #", "import pdb; pdb.set_trace() total_detections = [np.zeros((0, 9)) for _ in", "# os.path.join('demo', out), # (512, 512), # (1024, 1024)) for", "#out = target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target, # os.path.join('demo', out), # (512, 512),", "np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2", "chip = img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w + wn, :3] subimg[:chip.shape[0],", "CLASS_MAP else CLASS_MAP[cls] for cls in self.classnames] img = draw_poly_detections(srcpath,", "glob('data/roksi2020/val/images/*.png') #target = roksis[1] #out = target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target, # os.path.join('demo',", "subimg = np.zeros((hn, wn, channel)) # print('i: ', i, 'j:", "try: total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id])) except: import pdb; pdb.set_trace() #", "while order.size > 0: ovr = [] i = order[0]", "/ (areas[i] + areas[order[1:]] - hbb_inter) h_inds = np.where(hbb_ovr >", "as np from tqdm import tqdm import DOTA_devkit.polyiou as polyiou", "= dets[:, 0:-1] x1 = np.min(obbs[:, 0::2], axis=1) y1 =", "inference_detector, show_result, draw_poly_detections import mmcv from mmcv import Config from", "= w * h hbb_ovr = hbb_inter / (areas[i] +", "total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id])) except: import pdb; pdb.set_trace() # nms", "for cls_id, name in enumerate(self.classnames): chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:,", "', result) for cls_id, name in enumerate(self.classnames): chip_detections[cls_id][:, :8][:, ::2]", "np.zeros((hn, wn, channel)) # print('i: ', i, 'j: ', j)", "+ 1) * (y2 - y1 + 1) polys =", "obbs = dets[:, 0:-1] x1 = np.min(obbs[:, 0::2], axis=1) y1", "check the corner case # import pdb; pdb.set_trace() total_detections =", "import pdb CLASS_NAMES_KR = ('소형 선박', '대형 선박', '민간 항공기',", "선박', '민간 항공기', '군용 항공기', '소형 승용차', '버스', '트럭', '기차',", "print('i: ', i, 'j: ', j) chip = img[j*slide_h:j*slide_h +", "'다리', '정유탱크', '댐', '운동경기장', '헬리패드', '원형 교차로') CLASS_NAMES_EN = ('small", "import glob roksis = glob('data/roksi2020/val/images/*.png') #target = roksis[1] #out =", "'crane', 'bridge', 'oiltank', 'dam', 'stadium', 'helipad', 'roundabout') CLASS_MAP = {k:v", "thresh): obbs = dets[:, 0:-1] x1 = np.min(obbs[:, 0::2], axis=1)", "RoITransformer self.config_file = config_file self.checkpoint_file = checkpoint_file self.cfg = Config.fromfile(self.config_file)", "i*slide_w:i*slide_w + wn, :3] subimg[:chip.shape[0], :chip.shape[1], :] = chip chip_detections", "range(len(dets)): tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4], dets[i][5], dets[i][6],", "srcpath, dstpath, slide_size, chip_size): detections = self.inference_single(srcpath, slide_size, chip_size) classnames", "checkpoint_file, device='cuda:0') def inference_single(self, imagname, slide_size, chip_size): img = mmcv.imread(imagname)", "'civil airplane', 'military airplane', 'small car', 'bus', 'truck', 'train', 'crane',", "(x2 - x1 + 1) * (y2 - y1 +", ":8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] + i * slide_w", "self.dataset.CLASSES self.model = init_detector(config_file, checkpoint_file, device='cuda:0') def inference_single(self, imagname, slide_size,", "axis=1) scores = dets[:, 8] areas = (x2 - x1", "[cls if cls not in CLASS_MAP else CLASS_MAP[cls] for cls", "range(tmp_order.size): iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]]) hbb_ovr[h_inds[j]] = iou try: if", "from mmcv import Config from mmdet.datasets import get_dataset import cv2", "as polyiou import math import pdb CLASS_NAMES_KR = ('소형 선박',", "yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 =", "print(os.path.join('demo/fasterrcnn', out)) roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn', out), (512, 512), (1024, 1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg',", "import cv2 import os import numpy as np from tqdm", "checkpoint_file self.cfg = Config.fromfile(self.config_file) self.data_test = self.cfg.data['test'] self.dataset = get_dataset(self.data_test)", "np.min(obbs[:, 1::2], axis=1) x2 = np.max(obbs[:, 0::2], axis=1) y2 =", "('small ship', 'large ship', 'civil airplane', 'military airplane', 'small car',", "항공기', '소형 승용차', '버스', '트럭', '기차', '크레인', '다리', '정유탱크', '댐',", "- xx1) h = np.maximum(0.0, yy2 - yy1) hbb_inter =", "hn, wn = chip_size # TODO: check the corner case", "CLASS_NAMES_EN)} def py_cpu_nms_poly_fast_np(dets, thresh): obbs = dets[:, 0:-1] x1 =", "# (512, 512), # (1024, 1024)) for target in roksis[:100]:", "y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w", "+ 1] return keep class DetectorModel(): def __init__(self, config_file, checkpoint_file):", "class DetectorModel(): def __init__(self, config_file, checkpoint_file): # init RoITransformer self.config_file", "get_dataset import cv2 import os import numpy as np from", "self.dataset = get_dataset(self.data_test) self.classnames = self.dataset.CLASSES self.model = init_detector(config_file, checkpoint_file,", "out)) roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn', out), (512, 512), (1024, 1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg', #", "inference_detector(self.model, subimg) # print('result: ', result) for cls_id, name in", "= np.min(obbs[:, 1::2], axis=1) x2 = np.max(obbs[:, 0::2], axis=1) y2", "h_inds = np.where(hbb_ovr > 0)[0] tmp_order = order[h_inds + 1]", "config_file, checkpoint_file): # init RoITransformer self.config_file = config_file self.checkpoint_file =", "', j) chip = img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w + wn,", "classnames, scale=1, threshold=0.3) cv2.imwrite(dstpath, img) if __name__ == '__main__': #roitransformer", "hbb_ovr[h_inds[j]] = iou try: if math.isnan(ovr[0]): pdb.set_trace() except: pass inds", ":] = chip chip_detections = inference_detector(self.model, subimg) # print('result: ',", "mmcv import Config from mmdet.datasets import get_dataset import cv2 import", "if math.isnan(ovr[0]): pdb.set_trace() except: pass inds = np.where(hbb_ovr <= thresh)[0]", "self.data_test = self.cfg.data['test'] self.dataset = get_dataset(self.data_test) self.classnames = self.dataset.CLASSES self.model", "wn = chip_size # TODO: check the corner case #", "* slide_h # import pdb;pdb.set_trace() try: total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id]))", "hbb_inter = w * h hbb_ovr = hbb_inter / (areas[i]", "(areas[i] + areas[order[1:]] - hbb_inter) h_inds = np.where(hbb_ovr > 0)[0]", "roksis[:100]: out = target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn', out)) roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn', out), (512,", "= order[inds + 1] return keep class DetectorModel(): def __init__(self,", "1): subimg = np.zeros((hn, wn, channel)) # print('i: ', i,", "1) * (y2 - y1 + 1) polys = []", "wn, :3] subimg[:chip.shape[0], :chip.shape[1], :] = chip chip_detections = inference_detector(self.model,", "'원형 교차로') CLASS_NAMES_EN = ('small ship', 'large ship', 'civil airplane',", "Config from mmdet.datasets import get_dataset import cv2 import os import", "y2 = np.max(obbs[:, 1::2], axis=1) scores = dets[:, 8] areas", "# r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from glob import glob", ":chip.shape[1], :] = chip chip_detections = inference_detector(self.model, subimg) # print('result:", "'기차', '크레인', '다리', '정유탱크', '댐', '운동경기장', '헬리패드', '원형 교차로') CLASS_NAMES_EN", "1] for j in range(tmp_order.size): iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]]) hbb_ovr[h_inds[j]]", "dets[i][5], dets[i][6], dets[i][7]]) polys.append(tm_polygon) order = scores.argsort()[::-1] keep = []", "_ in range(len(self.classnames))] for i in tqdm(range(int(width / slide_w +", "DetectorModel(): def __init__(self, config_file, checkpoint_file): # init RoITransformer self.config_file =", "[] for i in range(len(dets)): tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1], dets[i][2],", "target in roksis[:100]: out = target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn', out)) roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn',", "mmcv from mmcv import Config from mmdet.datasets import get_dataset import", "axis=1) x2 = np.max(obbs[:, 0::2], axis=1) y2 = np.max(obbs[:, 1::2],", "(y2 - y1 + 1) polys = [] for i", "= [] for i in range(len(dets)): tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1],", "self.config_file = config_file self.checkpoint_file = checkpoint_file self.cfg = Config.fromfile(self.config_file) self.data_test", "slide_w chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j", "= (x2 - x1 + 1) * (y2 - y1", "+ 1) polys = [] for i in range(len(dets)): tm_polygon", "slide_h # import pdb;pdb.set_trace() try: total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id])) except:", "'__main__': #roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', # r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', #", "= target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target, # os.path.join('demo', out), # (512, 512), #", "= inference_detector(self.model, subimg) # print('result: ', result) for cls_id, name", "w = np.maximum(0.0, xx2 - xx1) h = np.maximum(0.0, yy2", "np.maximum(0.0, yy2 - yy1) hbb_inter = w * h hbb_ovr", "ovr = [] i = order[0] keep.append(i) xx1 = np.maximum(x1[i],", "'소형 승용차', '버스', '트럭', '기차', '크레인', '다리', '정유탱크', '댐', '운동경기장',", "detections = self.inference_single(srcpath, slide_size, chip_size) classnames = [cls if cls", "[] i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1", "cls_id, name in enumerate(self.classnames): chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:,", "import Config from mmdet.datasets import get_dataset import cv2 import os", "in range(int(height / slide_h) + 1): subimg = np.zeros((hn, wn,", "import pdb; pdb.set_trace() # nms for i in range(len(self.classnames)): keep", "'small car', 'bus', 'truck', 'train', 'crane', 'bridge', 'oiltank', 'dam', 'stadium',", "chip_detections[cls_id][:, :8][:, ::2] + i * slide_w chip_detections[cls_id][:, :8][:, 1::2]", "CLASS_MAP = {k:v for k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)} def", "thresh)[0] order = order[inds + 1] return keep class DetectorModel():", "'military airplane', 'small car', 'bus', 'truck', 'train', 'crane', 'bridge', 'oiltank',", "mmcv.imread(imagname) height, width, channel = img.shape slide_h, slide_w = slide_size", "'stadium', 'helipad', 'roundabout') CLASS_MAP = {k:v for k, v in", "= get_dataset(self.data_test) self.classnames = self.dataset.CLASSES self.model = init_detector(config_file, checkpoint_file, device='cuda:0')", "in range(tmp_order.size): iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]]) hbb_ovr[h_inds[j]] = iou try:", "'dam', 'stadium', 'helipad', 'roundabout') CLASS_MAP = {k:v for k, v", "roksis = glob('data/roksi2020/val/images/*.png') #target = roksis[1] #out = target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target,", "', i, 'j: ', j) chip = img[j*slide_h:j*slide_h + hn,", "x2 = np.max(obbs[:, 0::2], axis=1) y2 = np.max(obbs[:, 1::2], axis=1)", "in range(len(self.classnames))] for i in tqdm(range(int(width / slide_w + 1))):", "nms for i in range(len(self.classnames)): keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1) total_detections[i]", "pdb.set_trace() total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))] for", "pdb;pdb.set_trace() try: total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id])) except: import pdb; pdb.set_trace()", "chip_size): detections = self.inference_single(srcpath, slide_size, chip_size) classnames = [cls if", "areas = (x2 - x1 + 1) * (y2 -", "'정유탱크', '댐', '운동경기장', '헬리패드', '원형 교차로') CLASS_NAMES_EN = ('small ship',", "= np.max(obbs[:, 1::2], axis=1) scores = dets[:, 8] areas =", "self.cfg.data['test'] self.dataset = get_dataset(self.data_test) self.classnames = self.dataset.CLASSES self.model = init_detector(config_file,", "draw_poly_detections(srcpath, detections, classnames, scale=1, threshold=0.3) cv2.imwrite(dstpath, img) if __name__ ==", "iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]]) hbb_ovr[h_inds[j]] = iou try: if math.isnan(ovr[0]):", "pdb; pdb.set_trace() total_detections = [np.zeros((0, 9)) for _ in range(len(self.classnames))]", "dets[i][1], dets[i][2], dets[i][3], dets[i][4], dets[i][5], dets[i][6], dets[i][7]]) polys.append(tm_polygon) order =", "0)[0] tmp_order = order[h_inds + 1] for j in range(tmp_order.size):", "def __init__(self, config_file, checkpoint_file): # init RoITransformer self.config_file = config_file", "= self.inference_single(srcpath, slide_size, chip_size) classnames = [cls if cls not", "dets[i][4], dets[i][5], dets[i][6], dets[i][7]]) polys.append(tm_polygon) order = scores.argsort()[::-1] keep =", "= checkpoint_file self.cfg = Config.fromfile(self.config_file) self.data_test = self.cfg.data['test'] self.dataset =", "dets[i][2], dets[i][3], dets[i][4], dets[i][5], dets[i][6], dets[i][7]]) polys.append(tm_polygon) order = scores.argsort()[::-1]", "out = target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn', out)) roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn', out), (512, 512),", "항공기', '군용 항공기', '소형 승용차', '버스', '트럭', '기차', '크레인', '다리',", "show_result, draw_poly_detections import mmcv from mmcv import Config from mmdet.datasets", "car', 'bus', 'truck', 'train', 'crane', 'bridge', 'oiltank', 'dam', 'stadium', 'helipad',", "'oiltank', 'dam', 'stadium', 'helipad', 'roundabout') CLASS_MAP = {k:v for k,", "8] areas = (x2 - x1 + 1) * (y2", "= roksis[1] #out = target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target, # os.path.join('demo', out), #", "zip(CLASS_NAMES_KR, CLASS_NAMES_EN)} def py_cpu_nms_poly_fast_np(dets, thresh): obbs = dets[:, 0:-1] x1", "case # import pdb; pdb.set_trace() total_detections = [np.zeros((0, 9)) for", "1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg', # r'demo/P0009_out.jpg', # (512, 512), # (1024, 1024))", "r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from glob import glob roksis", "= config_file self.checkpoint_file = checkpoint_file self.cfg = Config.fromfile(self.config_file) self.data_test =", "dets[i][6], dets[i][7]]) polys.append(tm_polygon) order = scores.argsort()[::-1] keep = [] while", "= {k:v for k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)} def py_cpu_nms_poly_fast_np(dets,", "range(int(height / slide_h) + 1): subimg = np.zeros((hn, wn, channel))", "keep class DetectorModel(): def __init__(self, config_file, checkpoint_file): # init RoITransformer", "x1 = np.min(obbs[:, 0::2], axis=1) y1 = np.min(obbs[:, 1::2], axis=1)", "# print('result: ', result) for cls_id, name in enumerate(self.classnames): chip_detections[cls_id][:,", "import tqdm import DOTA_devkit.polyiou as polyiou import math import pdb", "= ('소형 선박', '대형 선박', '민간 항공기', '군용 항공기', '소형", "#roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', # r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', # r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth')", "= img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w + wn, :3] subimg[:chip.shape[0], :chip.shape[1],", "0:-1] x1 = np.min(obbs[:, 0::2], axis=1) y1 = np.min(obbs[:, 1::2],", "name in enumerate(self.classnames): chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2]", "i = order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 =", "np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]])", "except: import pdb; pdb.set_trace() # nms for i in range(len(self.classnames)):", "mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections import mmcv from mmcv", "mmdet.datasets import get_dataset import cv2 import os import numpy as", "hbb_inter) h_inds = np.where(hbb_ovr > 0)[0] tmp_order = order[h_inds +", "py_cpu_nms_poly_fast_np(dets, thresh): obbs = dets[:, 0:-1] x1 = np.min(obbs[:, 0::2],", "dets[i][3], dets[i][4], dets[i][5], dets[i][6], dets[i][7]]) polys.append(tm_polygon) order = scores.argsort()[::-1] keep", "chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] + i *", "classnames = [cls if cls not in CLASS_MAP else CLASS_MAP[cls]", "= glob('data/roksi2020/val/images/*.png') #target = roksis[1] #out = target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target, #", "j) chip = img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w + wn, :3]", "j in range(int(height / slide_h) + 1): subimg = np.zeros((hn,", "#roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', # r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from", "hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter) h_inds", "subimg[:chip.shape[0], :chip.shape[1], :] = chip chip_detections = inference_detector(self.model, subimg) #", "> 0: ovr = [] i = order[0] keep.append(i) xx1", "r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', # r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth')", "+ hn, i*slide_w:i*slide_w + wn, :3] subimg[:chip.shape[0], :chip.shape[1], :] =", "== '__main__': #roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', # r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py',", "= np.max(obbs[:, 0::2], axis=1) y2 = np.max(obbs[:, 1::2], axis=1) scores", "CLASS_MAP[cls] for cls in self.classnames] img = draw_poly_detections(srcpath, detections, classnames,", "1::2] + j * slide_h # import pdb;pdb.set_trace() try: total_detections[cls_id]", "'helipad', 'roundabout') CLASS_MAP = {k:v for k, v in zip(CLASS_NAMES_KR,", "scores.argsort()[::-1] keep = [] while order.size > 0: ovr =", "height, width, channel = img.shape slide_h, slide_w = slide_size hn,", "+ 1): subimg = np.zeros((hn, wn, channel)) # print('i: ',", "checkpoint_file): # init RoITransformer self.config_file = config_file self.checkpoint_file = checkpoint_file", "= total_detections[i][keep] return total_detections def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size):", "j in range(tmp_order.size): iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]]) hbb_ovr[h_inds[j]] = iou", "np.min(obbs[:, 0::2], axis=1) y1 = np.min(obbs[:, 1::2], axis=1) x2 =", "dstpath, slide_size, chip_size): detections = self.inference_single(srcpath, slide_size, chip_size) classnames =", "airplane', 'military airplane', 'small car', 'bus', 'truck', 'train', 'crane', 'bridge',", "= np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2 = np.minimum(y2[i],", "= DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', # r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', # r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer", "numpy as np from tqdm import tqdm import DOTA_devkit.polyiou as", "= [cls if cls not in CLASS_MAP else CLASS_MAP[cls] for", "('소형 선박', '대형 선박', '민간 항공기', '군용 항공기', '소형 승용차',", "x1 + 1) * (y2 - y1 + 1) polys", "i in range(len(dets)): tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4],", "= iou try: if math.isnan(ovr[0]): pdb.set_trace() except: pass inds =", "keep = [] while order.size > 0: ovr = []", "chip_detections = inference_detector(self.model, subimg) # print('result: ', result) for cls_id,", "pass inds = np.where(hbb_ovr <= thresh)[0] order = order[inds +", ":8][:, 1::2] + j * slide_h # import pdb;pdb.set_trace() try:", "for i in range(len(self.classnames)): keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1) total_detections[i] =", "init_detector, inference_detector, show_result, draw_poly_detections import mmcv from mmcv import Config", "1::2], axis=1) x2 = np.max(obbs[:, 0::2], axis=1) y2 = np.max(obbs[:,", "in self.classnames] img = draw_poly_detections(srcpath, detections, classnames, scale=1, threshold=0.3) cv2.imwrite(dstpath,", ":8][:, ::2] + i * slide_w chip_detections[cls_id][:, :8][:, 1::2] =", "= dets[:, 8] areas = (x2 - x1 + 1)", "= order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i],", "from tqdm import tqdm import DOTA_devkit.polyiou as polyiou import math", "#roitransformer.inference_single_vis(target, # os.path.join('demo', out), # (512, 512), # (1024, 1024))", "py_cpu_nms_poly_fast_np(total_detections[i], 0.1) total_detections[i] = total_detections[i][keep] return total_detections def inference_single_vis(self, srcpath,", "print('result: ', result) for cls_id, name in enumerate(self.classnames): chip_detections[cls_id][:, :8][:,", "subimg) # print('result: ', result) for cls_id, name in enumerate(self.classnames):", "self.cfg = Config.fromfile(self.config_file) self.data_test = self.cfg.data['test'] self.dataset = get_dataset(self.data_test) self.classnames", "np.maximum(0.0, xx2 - xx1) h = np.maximum(0.0, yy2 - yy1)", "hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter) h_inds = np.where(hbb_ovr", "in range(len(dets)): tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4], dets[i][5],", "512), # (1024, 1024)) for target in roksis[:100]: out =", "for k, v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)} def py_cpu_nms_poly_fast_np(dets, thresh): obbs", "j * slide_h # import pdb;pdb.set_trace() try: total_detections[cls_id] = np.concatenate((total_detections[cls_id],", "roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from glob import glob roksis =", "if __name__ == '__main__': #roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', # r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer", "draw_poly_detections import mmcv from mmcv import Config from mmdet.datasets import", "else CLASS_MAP[cls] for cls in self.classnames] img = draw_poly_detections(srcpath, detections,", "airplane', 'small car', 'bus', 'truck', 'train', 'crane', 'bridge', 'oiltank', 'dam',", "img = mmcv.imread(imagname) height, width, channel = img.shape slide_h, slide_w", "= np.min(obbs[:, 0::2], axis=1) y1 = np.min(obbs[:, 1::2], axis=1) x2", "slide_w = slide_size hn, wn = chip_size # TODO: check", "i in tqdm(range(int(width / slide_w + 1))): for j in", "1) polys = [] for i in range(len(dets)): tm_polygon =", "1))): for j in range(int(height / slide_h) + 1): subimg", "order[0] keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]])", "iou try: if math.isnan(ovr[0]): pdb.set_trace() except: pass inds = np.where(hbb_ovr", "= py_cpu_nms_poly_fast_np(total_detections[i], 0.1) total_detections[i] = total_detections[i][keep] return total_detections def inference_single_vis(self,", "polyiou.iou_poly(polys[i], polys[tmp_order[j]]) hbb_ovr[h_inds[j]] = iou try: if math.isnan(ovr[0]): pdb.set_trace() except:", "inds = np.where(hbb_ovr <= thresh)[0] order = order[inds + 1]", "axis=1) y2 = np.max(obbs[:, 1::2], axis=1) scores = dets[:, 8]", "w * h hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]]", "math.isnan(ovr[0]): pdb.set_trace() except: pass inds = np.where(hbb_ovr <= thresh)[0] order", "0::2], axis=1) y2 = np.max(obbs[:, 1::2], axis=1) scores = dets[:,", "+ areas[order[1:]] - hbb_inter) h_inds = np.where(hbb_ovr > 0)[0] tmp_order", "pdb CLASS_NAMES_KR = ('소형 선박', '대형 선박', '민간 항공기', '군용", "hn, i*slide_w:i*slide_w + wn, :3] subimg[:chip.shape[0], :chip.shape[1], :] = chip", "np.where(hbb_ovr > 0)[0] tmp_order = order[h_inds + 1] for j", "512), (1024, 1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg', # r'demo/P0009_out.jpg', # (512, 512), #", "::2] = chip_detections[cls_id][:, :8][:, ::2] + i * slide_w chip_detections[cls_id][:,", "y1 + 1) polys = [] for i in range(len(dets)):", "in roksis[:100]: out = target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn', out)) roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn', out),", "glob import glob roksis = glob('data/roksi2020/val/images/*.png') #target = roksis[1] #out", "= polyiou.iou_poly(polys[i], polys[tmp_order[j]]) hbb_ovr[h_inds[j]] = iou try: if math.isnan(ovr[0]): pdb.set_trace()", "<reponame>gunlyungyou/AerialDetection<filename>demo_large_image.py<gh_stars>1-10 from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections import mmcv", "imagname, slide_size, chip_size): img = mmcv.imread(imagname) height, width, channel =", "= [] while order.size > 0: ovr = [] i", "chip chip_detections = inference_detector(self.model, subimg) # print('result: ', result) for", "def inference_single(self, imagname, slide_size, chip_size): img = mmcv.imread(imagname) height, width,", "np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]])", "DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', # r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from glob import", "from glob import glob roksis = glob('data/roksi2020/val/images/*.png') #target = roksis[1]", "chip_size): img = mmcv.imread(imagname) height, width, channel = img.shape slide_h,", "dets[i][7]]) polys.append(tm_polygon) order = scores.argsort()[::-1] keep = [] while order.size", "= DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', # r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from glob", "import DOTA_devkit.polyiou as polyiou import math import pdb CLASS_NAMES_KR =", "h hbb_ovr = hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter)", "img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w + wn, :3] subimg[:chip.shape[0], :chip.shape[1], :]", "'크레인', '다리', '정유탱크', '댐', '운동경기장', '헬리패드', '원형 교차로') CLASS_NAMES_EN =", "target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn', out)) roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn', out), (512, 512), (1024, 1024))", "= np.where(hbb_ovr > 0)[0] tmp_order = order[h_inds + 1] for", "init_detector(config_file, checkpoint_file, device='cuda:0') def inference_single(self, imagname, slide_size, chip_size): img =", "chip_size) classnames = [cls if cls not in CLASS_MAP else", "scale=1, threshold=0.3) cv2.imwrite(dstpath, img) if __name__ == '__main__': #roitransformer =", "'j: ', j) chip = img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w +", "import get_dataset import cv2 import os import numpy as np", "= hbb_inter / (areas[i] + areas[order[1:]] - hbb_inter) h_inds =", "Config.fromfile(self.config_file) self.data_test = self.cfg.data['test'] self.dataset = get_dataset(self.data_test) self.classnames = self.dataset.CLASSES", "slide_size, chip_size): img = mmcv.imread(imagname) height, width, channel = img.shape", "target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target, # os.path.join('demo', out), # (512, 512), # (1024,", "tm_polygon = polyiou.VectorDouble([dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4], dets[i][5], dets[i][6], dets[i][7]])", "in range(len(self.classnames)): keep = py_cpu_nms_poly_fast_np(total_detections[i], 0.1) total_detections[i] = total_detections[i][keep] return", "result) for cls_id, name in enumerate(self.classnames): chip_detections[cls_id][:, :8][:, ::2] =", "cls not in CLASS_MAP else CLASS_MAP[cls] for cls in self.classnames]", "return keep class DetectorModel(): def __init__(self, config_file, checkpoint_file): # init", "enumerate(self.classnames): chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] + i", "yy2 - yy1) hbb_inter = w * h hbb_ovr =", "[] while order.size > 0: ovr = [] i =", "chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j *", "img.shape slide_h, slide_w = slide_size hn, wn = chip_size #", "= np.where(hbb_ovr <= thresh)[0] order = order[inds + 1] return", "import os import numpy as np from tqdm import tqdm", "= np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1) h", "in enumerate(self.classnames): chip_detections[cls_id][:, :8][:, ::2] = chip_detections[cls_id][:, :8][:, ::2] +", "self.inference_single(srcpath, slide_size, chip_size) classnames = [cls if cls not in", "x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 = np.minimum(x2[i], x2[order[1:]]) yy2", "r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from glob import glob roksis = glob('data/roksi2020/val/images/*.png') #target =", "os.path.join('demo/fasterrcnn', out), (512, 512), (1024, 1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg', # r'demo/P0009_out.jpg', #", "= chip chip_detections = inference_detector(self.model, subimg) # print('result: ', result)", "pdb.set_trace() except: pass inds = np.where(hbb_ovr <= thresh)[0] order =", "'운동경기장', '헬리패드', '원형 교차로') CLASS_NAMES_EN = ('small ship', 'large ship',", "init RoITransformer self.config_file = config_file self.checkpoint_file = checkpoint_file self.cfg =", "tqdm import tqdm import DOTA_devkit.polyiou as polyiou import math import", "self.checkpoint_file = checkpoint_file self.cfg = Config.fromfile(self.config_file) self.data_test = self.cfg.data['test'] self.dataset", "= scores.argsort()[::-1] keep = [] while order.size > 0: ovr", "'댐', '운동경기장', '헬리패드', '원형 교차로') CLASS_NAMES_EN = ('small ship', 'large", "config_file self.checkpoint_file = checkpoint_file self.cfg = Config.fromfile(self.config_file) self.data_test = self.cfg.data['test']", "(512, 512), # (1024, 1024)) for target in roksis[:100]: out", "'bridge', 'oiltank', 'dam', 'stadium', 'helipad', 'roundabout') CLASS_MAP = {k:v for", "def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size): detections = self.inference_single(srcpath, slide_size,", "= img.shape slide_h, slide_w = slide_size hn, wn = chip_size", "np from tqdm import tqdm import DOTA_devkit.polyiou as polyiou import", "scores = dets[:, 8] areas = (x2 - x1 +", "import mmcv from mmcv import Config from mmdet.datasets import get_dataset", "total_detections def inference_single_vis(self, srcpath, dstpath, slide_size, chip_size): detections = self.inference_single(srcpath,", "승용차', '버스', '트럭', '기차', '크레인', '다리', '정유탱크', '댐', '운동경기장', '헬리패드',", "# init RoITransformer self.config_file = config_file self.checkpoint_file = checkpoint_file self.cfg", "= slide_size hn, wn = chip_size # TODO: check the", "try: if math.isnan(ovr[0]): pdb.set_trace() except: pass inds = np.where(hbb_ovr <=", "roksis[1] #out = target.split('/')[-1][:-4]+'_out.jpg' #roitransformer.inference_single_vis(target, # os.path.join('demo', out), # (512,", "for j in range(tmp_order.size): iou = polyiou.iou_poly(polys[i], polys[tmp_order[j]]) hbb_ovr[h_inds[j]] =", "width, channel = img.shape slide_h, slide_w = slide_size hn, wn", "order = order[inds + 1] return keep class DetectorModel(): def", "v in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)} def py_cpu_nms_poly_fast_np(dets, thresh): obbs = dets[:,", "in zip(CLASS_NAMES_KR, CLASS_NAMES_EN)} def py_cpu_nms_poly_fast_np(dets, thresh): obbs = dets[:, 0:-1]", "- yy1) hbb_inter = w * h hbb_ovr = hbb_inter", "= Config.fromfile(self.config_file) self.data_test = self.cfg.data['test'] self.dataset = get_dataset(self.data_test) self.classnames =", "[np.zeros((0, 9)) for _ in range(len(self.classnames))] for i in tqdm(range(int(width", "self.model = init_detector(config_file, checkpoint_file, device='cuda:0') def inference_single(self, imagname, slide_size, chip_size):", "* (y2 - y1 + 1) polys = [] for", "from mmdet.apis import init_detector, inference_detector, show_result, draw_poly_detections import mmcv from", "img = draw_poly_detections(srcpath, detections, classnames, scale=1, threshold=0.3) cv2.imwrite(dstpath, img) if", "slide_size, chip_size) classnames = [cls if cls not in CLASS_MAP", "out), (512, 512), (1024, 1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg', # r'demo/P0009_out.jpg', # (512,", "+ i * slide_w chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:,", "i * slide_w chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2]", "slide_h, slide_w = slide_size hn, wn = chip_size # TODO:", "cv2.imwrite(dstpath, img) if __name__ == '__main__': #roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', #", "slide_size, chip_size): detections = self.inference_single(srcpath, slide_size, chip_size) classnames = [cls", "'군용 항공기', '소형 승용차', '버스', '트럭', '기차', '크레인', '다리', '정유탱크',", "x2[order[1:]]) yy2 = np.minimum(y2[i], y2[order[1:]]) w = np.maximum(0.0, xx2 -", "device='cuda:0') def inference_single(self, imagname, slide_size, chip_size): img = mmcv.imread(imagname) height,", "corner case # import pdb; pdb.set_trace() total_detections = [np.zeros((0, 9))", "tqdm(range(int(width / slide_w + 1))): for j in range(int(height /", "DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', # r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', # r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer =", "'트럭', '기차', '크레인', '다리', '정유탱크', '댐', '운동경기장', '헬리패드', '원형 교차로')", "(512, 512), (1024, 1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg', # r'demo/P0009_out.jpg', # (512, 512),", "0::2], axis=1) y1 = np.min(obbs[:, 1::2], axis=1) x2 = np.max(obbs[:,", "'대형 선박', '민간 항공기', '군용 항공기', '소형 승용차', '버스', '트럭',", "# import pdb;pdb.set_trace() try: total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id])) except: import", "inference_single_vis(self, srcpath, dstpath, slide_size, chip_size): detections = self.inference_single(srcpath, slide_size, chip_size)", "self.classnames] img = draw_poly_detections(srcpath, detections, classnames, scale=1, threshold=0.3) cv2.imwrite(dstpath, img)", "def py_cpu_nms_poly_fast_np(dets, thresh): obbs = dets[:, 0:-1] x1 = np.min(obbs[:,", "= self.dataset.CLASSES self.model = init_detector(config_file, checkpoint_file, device='cuda:0') def inference_single(self, imagname,", "__init__(self, config_file, checkpoint_file): # init RoITransformer self.config_file = config_file self.checkpoint_file", "polys.append(tm_polygon) order = scores.argsort()[::-1] keep = [] while order.size >", "roitransformer.inference_single_vis(target, os.path.join('demo/fasterrcnn', out), (512, 512), (1024, 1024)) #roitransformer.inference_single_vis(r'demo/P0009.jpg', # r'demo/P0009_out.jpg',", "::2] + i * slide_w chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:,", "# r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer = DetectorModel(r'configs/roksi2020/retinanet_obb_r50_fpn_2x_roksi2020_mgpu.py', # r'work_dirs/retinanet_obb_r50_fpn_2x_roksi2020_mgpu/epoch_24.pth') roitransformer = DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py',", "dets[:, 8] areas = (x2 - x1 + 1) *", "cls in self.classnames] img = draw_poly_detections(srcpath, detections, classnames, scale=1, threshold=0.3)", "chip_size # TODO: check the corner case # import pdb;", "= chip_size # TODO: check the corner case # import", "threshold=0.3) cv2.imwrite(dstpath, img) if __name__ == '__main__': #roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py',", "order[h_inds + 1] for j in range(tmp_order.size): iou = polyiou.iou_poly(polys[i],", "+ j * slide_h # import pdb;pdb.set_trace() try: total_detections[cls_id] =", "inference_single(self, imagname, slide_size, chip_size): img = mmcv.imread(imagname) height, width, channel", ":3] subimg[:chip.shape[0], :chip.shape[1], :] = chip chip_detections = inference_detector(self.model, subimg)", "+ wn, :3] subimg[:chip.shape[0], :chip.shape[1], :] = chip chip_detections =", "TODO: check the corner case # import pdb; pdb.set_trace() total_detections", "1::2], axis=1) scores = dets[:, 8] areas = (x2 -", "for _ in range(len(self.classnames))] for i in tqdm(range(int(width / slide_w", "import init_detector, inference_detector, show_result, draw_poly_detections import mmcv from mmcv import", "os import numpy as np from tqdm import tqdm import", "= draw_poly_detections(srcpath, detections, classnames, scale=1, threshold=0.3) cv2.imwrite(dstpath, img) if __name__", "1024)) for target in roksis[:100]: out = target.split('/')[-1][:-4]+'_out.jpg' print(os.path.join('demo/fasterrcnn', out))", "tqdm import DOTA_devkit.polyiou as polyiou import math import pdb CLASS_NAMES_KR", "slide_size hn, wn = chip_size # TODO: check the corner", "9)) for _ in range(len(self.classnames))] for i in tqdm(range(int(width /", "pdb.set_trace() # nms for i in range(len(self.classnames)): keep = py_cpu_nms_poly_fast_np(total_detections[i],", "dets[:, 0:-1] x1 = np.min(obbs[:, 0::2], axis=1) y1 = np.min(obbs[:,", "polys = [] for i in range(len(dets)): tm_polygon = polyiou.VectorDouble([dets[i][0],", "* slide_w chip_detections[cls_id][:, :8][:, 1::2] = chip_detections[cls_id][:, :8][:, 1::2] +", "out), # (512, 512), # (1024, 1024)) for target in", "order = scores.argsort()[::-1] keep = [] while order.size > 0:", "1::2] = chip_detections[cls_id][:, :8][:, 1::2] + j * slide_h #", "glob roksis = glob('data/roksi2020/val/images/*.png') #target = roksis[1] #out = target.split('/')[-1][:-4]+'_out.jpg'", "import math import pdb CLASS_NAMES_KR = ('소형 선박', '대형 선박',", "the corner case # import pdb; pdb.set_trace() total_detections = [np.zeros((0,", "0: ovr = [] i = order[0] keep.append(i) xx1 =", "'truck', 'train', 'crane', 'bridge', 'oiltank', 'dam', 'stadium', 'helipad', 'roundabout') CLASS_MAP", "y2[order[1:]]) w = np.maximum(0.0, xx2 - xx1) h = np.maximum(0.0,", "in tqdm(range(int(width / slide_w + 1))): for j in range(int(height", "in CLASS_MAP else CLASS_MAP[cls] for cls in self.classnames] img =", "os.path.join('demo', out), # (512, 512), # (1024, 1024)) for target", "for cls in self.classnames] img = draw_poly_detections(srcpath, detections, classnames, scale=1,", "polyiou.VectorDouble([dets[i][0], dets[i][1], dets[i][2], dets[i][3], dets[i][4], dets[i][5], dets[i][6], dets[i][7]]) polys.append(tm_polygon) order", "math import pdb CLASS_NAMES_KR = ('소형 선박', '대형 선박', '민간", "0.1) total_detections[i] = total_detections[i][keep] return total_detections def inference_single_vis(self, srcpath, dstpath,", "xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2 =", "areas[order[1:]] - hbb_inter) h_inds = np.where(hbb_ovr > 0)[0] tmp_order =", "import pdb;pdb.set_trace() try: total_detections[cls_id] = np.concatenate((total_detections[cls_id], chip_detections[cls_id])) except: import pdb;", "i, 'j: ', j) chip = img[j*slide_h:j*slide_h + hn, i*slide_w:i*slide_w", "keep.append(i) xx1 = np.maximum(x1[i], x1[order[1:]]) yy1 = np.maximum(y1[i], y1[order[1:]]) xx2", "= order[h_inds + 1] for j in range(tmp_order.size): iou =", "polyiou import math import pdb CLASS_NAMES_KR = ('소형 선박', '대형", "h = np.maximum(0.0, yy2 - yy1) hbb_inter = w *", "ship', 'civil airplane', 'military airplane', 'small car', 'bus', 'truck', 'train',", "= np.zeros((hn, wn, channel)) # print('i: ', i, 'j: ',", "= DetectorModel(r'configs/roksi2020/faster_rcnn_RoITrans_r50_fpn_2x_roksi.py', r'work_dirs/faster_rcnn_RoITrans_r50_fpn_2x_roksi/epoch_24.pth') from glob import glob roksis = glob('data/roksi2020/val/images/*.png')", "# print('i: ', i, 'j: ', j) chip = img[j*slide_h:j*slide_h", "= init_detector(config_file, checkpoint_file, device='cuda:0') def inference_single(self, imagname, slide_size, chip_size): img", "tmp_order = order[h_inds + 1] for j in range(tmp_order.size): iou", "= np.concatenate((total_detections[cls_id], chip_detections[cls_id])) except: import pdb; pdb.set_trace() # nms for", "'bus', 'truck', 'train', 'crane', 'bridge', 'oiltank', 'dam', 'stadium', 'helipad', 'roundabout')", "np.max(obbs[:, 0::2], axis=1) y2 = np.max(obbs[:, 1::2], axis=1) scores =", "__name__ == '__main__': #roitransformer = DetectorModel(r'configs/DOTA/faster_rcnn_RoITrans_r50_fpn_1x_dota.py', # r'work_dirs/faster_rcnn_RoITrans_r50_fpn_1x_dota/epoch_12.pth') #roitransformer =", "- y1 + 1) polys = [] for i in", "1] return keep class DetectorModel(): def __init__(self, config_file, checkpoint_file): #", "'large ship', 'civil airplane', 'military airplane', 'small car', 'bus', 'truck',", "np.max(obbs[:, 1::2], axis=1) scores = dets[:, 8] areas = (x2" ]
[ "import Subject from dicoms.models import Session from dicoms.models import Series", "from django.contrib import admin from dicoms.models import Subject from dicoms.models", "import admin from dicoms.models import Subject from dicoms.models import Session", "admin from dicoms.models import Subject from dicoms.models import Session from", "django.contrib import admin from dicoms.models import Subject from dicoms.models import", "Subject from dicoms.models import Session from dicoms.models import Series admin.site.register(Session)", "from dicoms.models import Session from dicoms.models import Series admin.site.register(Session) admin.site.register(Subject)", "dicoms.models import Subject from dicoms.models import Session from dicoms.models import", "dicoms.models import Session from dicoms.models import Series admin.site.register(Session) admin.site.register(Subject) admin.site.register(Series)", "from dicoms.models import Subject from dicoms.models import Session from dicoms.models" ]
[ "\"\"\" import os os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"djangoreactredux.settings.dev\") from django.core.wsgi import get_wsgi_application from", "from django.core.wsgi import get_wsgi_application from whitenoise.django import DjangoWhiteNoise application =", "import get_wsgi_application from whitenoise.django import DjangoWhiteNoise application = get_wsgi_application() application", "django.core.wsgi import get_wsgi_application from whitenoise.django import DjangoWhiteNoise application = get_wsgi_application()", "config for django-react-redux-jwt-base project. \"\"\" import os os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"djangoreactredux.settings.dev\") from", "get_wsgi_application from whitenoise.django import DjangoWhiteNoise application = get_wsgi_application() application =", "<filename>src/djangoreactredux/wsgi.py \"\"\" WSGI config for django-react-redux-jwt-base project. \"\"\" import os", "\"djangoreactredux.settings.dev\") from django.core.wsgi import get_wsgi_application from whitenoise.django import DjangoWhiteNoise application", "django-react-redux-jwt-base project. \"\"\" import os os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"djangoreactredux.settings.dev\") from django.core.wsgi import", "os os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"djangoreactredux.settings.dev\") from django.core.wsgi import get_wsgi_application from whitenoise.django import", "os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"djangoreactredux.settings.dev\") from django.core.wsgi import get_wsgi_application from whitenoise.django import DjangoWhiteNoise", "for django-react-redux-jwt-base project. \"\"\" import os os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"djangoreactredux.settings.dev\") from django.core.wsgi", "project. \"\"\" import os os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"djangoreactredux.settings.dev\") from django.core.wsgi import get_wsgi_application", "import os os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"djangoreactredux.settings.dev\") from django.core.wsgi import get_wsgi_application from whitenoise.django", "from whitenoise.django import DjangoWhiteNoise application = get_wsgi_application() application = DjangoWhiteNoise(application)", "WSGI config for django-react-redux-jwt-base project. \"\"\" import os os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"djangoreactredux.settings.dev\")", "\"\"\" WSGI config for django-react-redux-jwt-base project. \"\"\" import os os.environ.setdefault(\"DJANGO_SETTINGS_MODULE\"," ]
[ "key (if applicable). :param key: The unprefixed key. :return: The", "_default_conf = {} def __init__(self, conf): self.conf = deepcopy(self._default_conf) self.conf.update(conf)", "= {} def __init__(self, conf): self.conf = deepcopy(self._default_conf) self.conf.update(conf) self.key_pattern", "any configured prefix prepended. \"\"\" pfx = self.key_prefix if self.key_prefix", "import jsonpickle class BaseReader(object): \"\"\" Base class for dynamic readers", "= self.key_prefix if self.key_prefix is not None else '' return", "\"\"\" Base class for dynamic readers \"\"\" _default_conf = {}", "value) def _is_valid_key(self, key): if not self.key_pattern: return True return", ":param key: The unprefixed key. :return: The key with any", "value): if not self._is_valid_key(key): return if self.auto_casting: value = jsonpickle.encode(value)", "key: The unprefixed key. :return: The key with any configured", "<gh_stars>0 # -*- coding: utf-8 -*- import re from copy", "key. :return: The key with any configured prefix prepended. \"\"\"", "result def set(self, key, value): if not self._is_valid_key(key): return if", "copy import deepcopy import jsonpickle class BaseReader(object): \"\"\" Base class", "import deepcopy import jsonpickle class BaseReader(object): \"\"\" Base class for", "deepcopy(self._default_conf) self.conf.update(conf) self.key_pattern = self.conf.get('pattern') self.auto_casting = self.conf.get('auto_casting') self.key_prefix =", "= self.conf.get('auto_casting') self.key_prefix = self.conf.get('prefix') def get(self, key): if not", "return bool(re.match(self.key_pattern, key)) def _qualified_key(self, key): \"\"\" Prepends the configured", "prefix to the key (if applicable). :param key: The unprefixed", "jsonpickle class BaseReader(object): \"\"\" Base class for dynamic readers \"\"\"", "The unprefixed key. :return: The key with any configured prefix", "\"\"\" Prepends the configured prefix to the key (if applicable).", "set(self, key, value): if not self._is_valid_key(key): return if self.auto_casting: value", "key): if not self._is_valid_key(key): return result = self._get(self._qualified_key(key)) if self.auto_casting", "return if self.auto_casting: value = jsonpickle.encode(value) self._set(self._qualified_key(key), value) def _is_valid_key(self,", "BaseReader(object): \"\"\" Base class for dynamic readers \"\"\" _default_conf =", "-*- coding: utf-8 -*- import re from copy import deepcopy", "unprefixed key. :return: The key with any configured prefix prepended.", "with any configured prefix prepended. \"\"\" pfx = self.key_prefix if", "self.auto_casting = self.conf.get('auto_casting') self.key_prefix = self.conf.get('prefix') def get(self, key): if", "self.key_prefix if self.key_prefix is not None else '' return '{}{}'.format(pfx,", "None): result = jsonpickle.decode(result) return result def set(self, key, value):", "{} def __init__(self, conf): self.conf = deepcopy(self._default_conf) self.conf.update(conf) self.key_pattern =", "self.conf.get('auto_casting') self.key_prefix = self.conf.get('prefix') def get(self, key): if not self._is_valid_key(key):", "is not None): result = jsonpickle.decode(result) return result def set(self,", "configured prefix to the key (if applicable). :param key: The", "utf-8 -*- import re from copy import deepcopy import jsonpickle", "self.conf.get('prefix') def get(self, key): if not self._is_valid_key(key): return result =", "return True return bool(re.match(self.key_pattern, key)) def _qualified_key(self, key): \"\"\" Prepends", "_qualified_key(self, key): \"\"\" Prepends the configured prefix to the key", "(result is not None): result = jsonpickle.decode(result) return result def", "deepcopy import jsonpickle class BaseReader(object): \"\"\" Base class for dynamic", "not self.key_pattern: return True return bool(re.match(self.key_pattern, key)) def _qualified_key(self, key):", "key with any configured prefix prepended. \"\"\" pfx = self.key_prefix", "__init__(self, conf): self.conf = deepcopy(self._default_conf) self.conf.update(conf) self.key_pattern = self.conf.get('pattern') self.auto_casting", "the configured prefix to the key (if applicable). :param key:", "class BaseReader(object): \"\"\" Base class for dynamic readers \"\"\" _default_conf", "self.conf = deepcopy(self._default_conf) self.conf.update(conf) self.key_pattern = self.conf.get('pattern') self.auto_casting = self.conf.get('auto_casting')", "True return bool(re.match(self.key_pattern, key)) def _qualified_key(self, key): \"\"\" Prepends the", "-*- import re from copy import deepcopy import jsonpickle class", "from copy import deepcopy import jsonpickle class BaseReader(object): \"\"\" Base", "import re from copy import deepcopy import jsonpickle class BaseReader(object):", "if not self._is_valid_key(key): return result = self._get(self._qualified_key(key)) if self.auto_casting and", "key): \"\"\" Prepends the configured prefix to the key (if", "return result def set(self, key, value): if not self._is_valid_key(key): return", "class for dynamic readers \"\"\" _default_conf = {} def __init__(self,", "self._set(self._qualified_key(key), value) def _is_valid_key(self, key): if not self.key_pattern: return True", "Base class for dynamic readers \"\"\" _default_conf = {} def", "not None): result = jsonpickle.decode(result) return result def set(self, key,", "key, value): if not self._is_valid_key(key): return if self.auto_casting: value =", "for dynamic readers \"\"\" _default_conf = {} def __init__(self, conf):", "= deepcopy(self._default_conf) self.conf.update(conf) self.key_pattern = self.conf.get('pattern') self.auto_casting = self.conf.get('auto_casting') self.key_prefix", "re from copy import deepcopy import jsonpickle class BaseReader(object): \"\"\"", "The key with any configured prefix prepended. \"\"\" pfx =", "self._get(self._qualified_key(key)) if self.auto_casting and (result is not None): result =", "self.key_pattern = self.conf.get('pattern') self.auto_casting = self.conf.get('auto_casting') self.key_prefix = self.conf.get('prefix') def", "the key (if applicable). :param key: The unprefixed key. :return:", "if self.auto_casting: value = jsonpickle.encode(value) self._set(self._qualified_key(key), value) def _is_valid_key(self, key):", "result = jsonpickle.decode(result) return result def set(self, key, value): if", "self.conf.update(conf) self.key_pattern = self.conf.get('pattern') self.auto_casting = self.conf.get('auto_casting') self.key_prefix = self.conf.get('prefix')", "jsonpickle.decode(result) return result def set(self, key, value): if not self._is_valid_key(key):", "pfx = self.key_prefix if self.key_prefix is not None else ''", "# -*- coding: utf-8 -*- import re from copy import", "self.key_pattern: return True return bool(re.match(self.key_pattern, key)) def _qualified_key(self, key): \"\"\"", "(if applicable). :param key: The unprefixed key. :return: The key", "self.key_prefix = self.conf.get('prefix') def get(self, key): if not self._is_valid_key(key): return", "if not self._is_valid_key(key): return if self.auto_casting: value = jsonpickle.encode(value) self._set(self._qualified_key(key),", "dynamic readers \"\"\" _default_conf = {} def __init__(self, conf): self.conf", "result = self._get(self._qualified_key(key)) if self.auto_casting and (result is not None):", "return result = self._get(self._qualified_key(key)) if self.auto_casting and (result is not", "_is_valid_key(self, key): if not self.key_pattern: return True return bool(re.match(self.key_pattern, key))", "self._is_valid_key(key): return result = self._get(self._qualified_key(key)) if self.auto_casting and (result is", "if self.auto_casting and (result is not None): result = jsonpickle.decode(result)", "def set(self, key, value): if not self._is_valid_key(key): return if self.auto_casting:", "self._is_valid_key(key): return if self.auto_casting: value = jsonpickle.encode(value) self._set(self._qualified_key(key), value) def", "self.auto_casting: value = jsonpickle.encode(value) self._set(self._qualified_key(key), value) def _is_valid_key(self, key): if", "= jsonpickle.encode(value) self._set(self._qualified_key(key), value) def _is_valid_key(self, key): if not self.key_pattern:", "= self._get(self._qualified_key(key)) if self.auto_casting and (result is not None): result", "jsonpickle.encode(value) self._set(self._qualified_key(key), value) def _is_valid_key(self, key): if not self.key_pattern: return", "not self._is_valid_key(key): return if self.auto_casting: value = jsonpickle.encode(value) self._set(self._qualified_key(key), value)", "and (result is not None): result = jsonpickle.decode(result) return result", "self.conf.get('pattern') self.auto_casting = self.conf.get('auto_casting') self.key_prefix = self.conf.get('prefix') def get(self, key):", "configured prefix prepended. \"\"\" pfx = self.key_prefix if self.key_prefix is", "= jsonpickle.decode(result) return result def set(self, key, value): if not", "bool(re.match(self.key_pattern, key)) def _qualified_key(self, key): \"\"\" Prepends the configured prefix", ":return: The key with any configured prefix prepended. \"\"\" pfx", "conf): self.conf = deepcopy(self._default_conf) self.conf.update(conf) self.key_pattern = self.conf.get('pattern') self.auto_casting =", "def __init__(self, conf): self.conf = deepcopy(self._default_conf) self.conf.update(conf) self.key_pattern = self.conf.get('pattern')", "= self.conf.get('pattern') self.auto_casting = self.conf.get('auto_casting') self.key_prefix = self.conf.get('prefix') def get(self,", "get(self, key): if not self._is_valid_key(key): return result = self._get(self._qualified_key(key)) if", "key): if not self.key_pattern: return True return bool(re.match(self.key_pattern, key)) def", "applicable). :param key: The unprefixed key. :return: The key with", "if not self.key_pattern: return True return bool(re.match(self.key_pattern, key)) def _qualified_key(self,", "to the key (if applicable). :param key: The unprefixed key.", "prepended. \"\"\" pfx = self.key_prefix if self.key_prefix is not None", "\"\"\" _default_conf = {} def __init__(self, conf): self.conf = deepcopy(self._default_conf)", "prefix prepended. \"\"\" pfx = self.key_prefix if self.key_prefix is not", "readers \"\"\" _default_conf = {} def __init__(self, conf): self.conf =", "self.auto_casting and (result is not None): result = jsonpickle.decode(result) return", "value = jsonpickle.encode(value) self._set(self._qualified_key(key), value) def _is_valid_key(self, key): if not", "\"\"\" pfx = self.key_prefix if self.key_prefix is not None else", "coding: utf-8 -*- import re from copy import deepcopy import", "def _is_valid_key(self, key): if not self.key_pattern: return True return bool(re.match(self.key_pattern,", "Prepends the configured prefix to the key (if applicable). :param", "= self.conf.get('prefix') def get(self, key): if not self._is_valid_key(key): return result", "def _qualified_key(self, key): \"\"\" Prepends the configured prefix to the", "if self.key_prefix is not None else '' return '{}{}'.format(pfx, key)", "not self._is_valid_key(key): return result = self._get(self._qualified_key(key)) if self.auto_casting and (result", "def get(self, key): if not self._is_valid_key(key): return result = self._get(self._qualified_key(key))", "key)) def _qualified_key(self, key): \"\"\" Prepends the configured prefix to" ]
[ "if __name__ == '__main__': rospy.init_node('map_frame_to_utm_tf_publisher') if not wait_for_params_successful(): rospy.logerr(\"map_frame_to_utm_tf_publisher: Could", "Forschungszentrum Informatik, Karlsruhe, Germany (www.fzi.de) # KIT, Institute of Measurement", "ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,", "THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR", "(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF", "'__main__': rospy.init_node('map_frame_to_utm_tf_publisher') if not wait_for_params_successful(): rospy.logerr(\"map_frame_to_utm_tf_publisher: Could not initialize\") exit()", "must reproduce the above copyright notice, # this list of", "continue return True return False if __name__ == '__main__': rospy.init_node('map_frame_to_utm_tf_publisher')", "# # 1. Redistributions of source code must retain the", "above copyright notice, # this list of conditions and the", "lon_origin = None map_frame_id = None actual_utm_with_no_offset_frame_id = None def", "# # Redistribution and use in source and binary forms,", "static_transform.transform.translation.x = -origin_xy.x static_transform.transform.translation.y = -origin_xy.y static_transform.transform.translation.z = 0.0 q", "notice, # this list of conditions and the following disclaimer.", "NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE", "range(3000): try: lat_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lat_origin\")) lon_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lon_origin\")) map_frame_id =", "2018 # FZI Forschungszentrum Informatik, Karlsruhe, Germany (www.fzi.de) # KIT,", "(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR", "INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT", "GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS;", "EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. #", "A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL", "not initialize\") exit() origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin) projector = lanelet2.projection.UtmProjector(", "THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH", "Karlsruhe, Germany (www.mrt.kit.edu) # All rights reserved. # # Redistribution", "= projector.forward(origin_latlon) stb = tf2_ros.TransformBroadcaster() static_transform = geometry_msgs.msg.TransformStamped() static_transform.header.stamp =", "in range(3000): try: lat_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lat_origin\")) lon_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lon_origin\")) map_frame_id", "of its contributors # may be used to endorse or", "static_transform.header.frame_id = map_frame_id static_transform.child_frame_id = actual_utm_with_no_offset_frame_id static_transform.transform.translation.x = -origin_xy.x static_transform.transform.translation.y", "of Measurement and Control, Karlsruhe, Germany (www.mrt.kit.edu) # All rights", "source and binary forms, with or without # modification, are", "products derived from this software without # specific prior written", "= q[0] static_transform.transform.rotation.y = q[1] static_transform.transform.rotation.z = q[2] static_transform.transform.rotation.w =", "Control, Karlsruhe, Germany (www.mrt.kit.edu) # All rights reserved. # #", "PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE", "ANY WAY OUT OF THE USE # OF THIS SOFTWARE,", "False if __name__ == '__main__': rospy.init_node('map_frame_to_utm_tf_publisher') if not wait_for_params_successful(): rospy.logerr(\"map_frame_to_utm_tf_publisher:", "rospy.Time.now() static_transform.header.frame_id = map_frame_id static_transform.child_frame_id = actual_utm_with_no_offset_frame_id static_transform.transform.translation.x = -origin_xy.x", "the names of its contributors # may be used to", "geometry_msgs.msg import lanelet2 stb = None static_transform = None lat_origin", "def timer_callback(event): global stb, static_transform static_transform.header.stamp = rospy.Time.now() stb.sendTransform(static_transform) def", "global stb, static_transform static_transform.header.stamp = rospy.Time.now() stb.sendTransform(static_transform) def wait_for_params_successful(): global", "= lanelet2.projection.UtmProjector( lanelet2.io.Origin(origin_latlon), False, False) origin_xy = projector.forward(origin_latlon) stb =", "or promote products derived from this software without # specific", "SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR", "initialize\") exit() origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin) projector = lanelet2.projection.UtmProjector( lanelet2.io.Origin(origin_latlon),", "derived from this software without # specific prior written permission.", "MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED.", "= geometry_msgs.msg.TransformStamped() static_transform.header.stamp = rospy.Time.now() static_transform.header.frame_id = map_frame_id static_transform.child_frame_id =", "OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR", "AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED", "ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, #", "actual_utm_with_no_offset_frame_id static_transform.transform.translation.x = -origin_xy.x static_transform.transform.translation.y = -origin_xy.y static_transform.transform.translation.z = 0.0", "None map_frame_id = None actual_utm_with_no_offset_frame_id = None def timer_callback(event): global", "disclaimer. # 2. Redistributions in binary form must reproduce the", "and binary forms, with or without # modification, are permitted", "endorse or promote products derived from this software without #", "the copyright holder nor the names of its contributors #", "USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE", "DAMAGE. # import roslib import rospy import tf import tf2_ros", "= rospy.get_param( \"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id\") except Exception: rospy.sleep(0.01) continue return True return", "BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY,", "tf.transformations.quaternion_from_euler(0, 0, 0) static_transform.transform.rotation.x = q[0] static_transform.transform.rotation.y = q[1] static_transform.transform.rotation.z", "Germany (www.fzi.de) # KIT, Institute of Measurement and Control, Karlsruhe,", "__name__ == '__main__': rospy.init_node('map_frame_to_utm_tf_publisher') if not wait_for_params_successful(): rospy.logerr(\"map_frame_to_utm_tf_publisher: Could not", "actual_utm_with_no_offset_frame_id for i in range(3000): try: lat_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lat_origin\")) lon_origin", "origin_xy = projector.forward(origin_latlon) stb = tf2_ros.TransformBroadcaster() static_transform = geometry_msgs.msg.TransformStamped() static_transform.header.stamp", "of source code must retain the above copyright notice, #", "OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON", "EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE", "OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY", "SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)", "None actual_utm_with_no_offset_frame_id = None def timer_callback(event): global stb, static_transform static_transform.header.stamp", "python # # Copyright (c) 2018 # FZI Forschungszentrum Informatik,", "written permission. # # THIS SOFTWARE IS PROVIDED BY THE", "WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE", "# this list of conditions and the following disclaimer. #", "WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE", "notice, # this list of conditions and the following disclaimer", "NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES;", "static_transform.transform.rotation.y = q[1] static_transform.transform.rotation.z = q[2] static_transform.transform.rotation.w = q[3] rospy.Timer(rospy.Duration(1.),", "roslib import rospy import tf import tf2_ros import geometry_msgs.msg import", "OUT OF THE USE # OF THIS SOFTWARE, EVEN IF", "q = tf.transformations.quaternion_from_euler(0, 0, 0) static_transform.transform.rotation.x = q[0] static_transform.transform.rotation.y =", "stb = None static_transform = None lat_origin = None lon_origin", "OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY", "CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,", "THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS", "disclaimer in the documentation # and/or other materials provided with", "following conditions are met: # # 1. Redistributions of source", "(www.fzi.de) # KIT, Institute of Measurement and Control, Karlsruhe, Germany", "\"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING,", "TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR", "distribution. # 3. Neither the name of the copyright holder", "CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES,", "static_transform.header.stamp = rospy.Time.now() stb.sendTransform(static_transform) def wait_for_params_successful(): global lat_origin, lon_origin, map_frame_id,", "import tf2_ros import geometry_msgs.msg import lanelet2 stb = None static_transform", "= rospy.Time.now() stb.sendTransform(static_transform) def wait_for_params_successful(): global lat_origin, lon_origin, map_frame_id, actual_utm_with_no_offset_frame_id", "# import roslib import rospy import tf import tf2_ros import", "nor the names of its contributors # may be used", "\"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id\") except Exception: rospy.sleep(0.01) continue return True return False if", "name of the copyright holder nor the names of its", "None lat_origin = None lon_origin = None map_frame_id = None", "# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR", "OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF", "Could not initialize\") exit() origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin) projector =", "static_transform static_transform.header.stamp = rospy.Time.now() stb.sendTransform(static_transform) def wait_for_params_successful(): global lat_origin, lon_origin,", "static_transform.transform.translation.y = -origin_xy.y static_transform.transform.translation.z = 0.0 q = tf.transformations.quaternion_from_euler(0, 0,", "DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS", "CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)", "Redistributions of source code must retain the above copyright notice,", "of conditions and the following disclaimer. # 2. Redistributions in", "OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT", "projector = lanelet2.projection.UtmProjector( lanelet2.io.Origin(origin_latlon), False, False) origin_xy = projector.forward(origin_latlon) stb", "lat_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lat_origin\")) lon_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lon_origin\")) map_frame_id = rospy.get_param(\"/lanelet2_interface_ros/map_frame_id\") actual_utm_with_no_offset_frame_id", "float(rospy.get_param(\"/lanelet2_interface_ros/lon_origin\")) map_frame_id = rospy.get_param(\"/lanelet2_interface_ros/map_frame_id\") actual_utm_with_no_offset_frame_id = rospy.get_param( \"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id\") except Exception:", "documentation # and/or other materials provided with the distribution. #", "(www.mrt.kit.edu) # All rights reserved. # # Redistribution and use", "IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE", "or without # modification, are permitted provided that the following", "OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT", "Redistribution and use in source and binary forms, with or", "source code must retain the above copyright notice, # this", "# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT", "= None lon_origin = None map_frame_id = None actual_utm_with_no_offset_frame_id =", "rospy.logerr(\"map_frame_to_utm_tf_publisher: Could not initialize\") exit() origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin) projector", "= rospy.Time.now() static_transform.header.frame_id = map_frame_id static_transform.child_frame_id = actual_utm_with_no_offset_frame_id static_transform.transform.translation.x =", "== '__main__': rospy.init_node('map_frame_to_utm_tf_publisher') if not wait_for_params_successful(): rospy.logerr(\"map_frame_to_utm_tf_publisher: Could not initialize\")", "HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT,", "used to endorse or promote products derived from this software", "import geometry_msgs.msg import lanelet2 stb = None static_transform = None", "= q[1] static_transform.transform.rotation.z = q[2] static_transform.transform.rotation.w = q[3] rospy.Timer(rospy.Duration(1.), timer_callback)", "SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.", "= None lat_origin = None lon_origin = None map_frame_id =", "rospy.get_param( \"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id\") except Exception: rospy.sleep(0.01) continue return True return False", "rights reserved. # # Redistribution and use in source and", "# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY", "0.0 q = tf.transformations.quaternion_from_euler(0, 0, 0) static_transform.transform.rotation.x = q[0] static_transform.transform.rotation.y", "DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING,", "q[1] static_transform.transform.rotation.z = q[2] static_transform.transform.rotation.w = q[3] rospy.Timer(rospy.Duration(1.), timer_callback) rospy.spin()", "rospy import tf import tf2_ros import geometry_msgs.msg import lanelet2 stb", "binary form must reproduce the above copyright notice, # this", "= None static_transform = None lat_origin = None lon_origin =", "try: lat_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lat_origin\")) lon_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lon_origin\")) map_frame_id = rospy.get_param(\"/lanelet2_interface_ros/map_frame_id\")", "PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" #", "-origin_xy.y static_transform.transform.translation.z = 0.0 q = tf.transformations.quaternion_from_euler(0, 0, 0) static_transform.transform.rotation.x", "exit() origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin) projector = lanelet2.projection.UtmProjector( lanelet2.io.Origin(origin_latlon), False,", "FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL #", "3. Neither the name of the copyright holder nor the", "BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF", "in source and binary forms, with or without # modification,", "AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED", "permitted provided that the following conditions are met: # #", "LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING", "EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE #", "rospy.Time.now() stb.sendTransform(static_transform) def wait_for_params_successful(): global lat_origin, lon_origin, map_frame_id, actual_utm_with_no_offset_frame_id for", "in the documentation # and/or other materials provided with the", "origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin) projector = lanelet2.projection.UtmProjector( lanelet2.io.Origin(origin_latlon), False, False)", "of conditions and the following disclaimer in the documentation #", "ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER", "= None actual_utm_with_no_offset_frame_id = None def timer_callback(event): global stb, static_transform", "None def timer_callback(event): global stb, static_transform static_transform.header.stamp = rospy.Time.now() stb.sendTransform(static_transform)", "OF SUCH DAMAGE. # import roslib import rospy import tf", "this list of conditions and the following disclaimer. # 2.", "form must reproduce the above copyright notice, # this list", "DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND", "stb, static_transform static_transform.header.stamp = rospy.Time.now() stb.sendTransform(static_transform) def wait_for_params_successful(): global lat_origin,", "# 1. Redistributions of source code must retain the above", "= float(rospy.get_param(\"/lanelet2_interface_ros/lat_origin\")) lon_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lon_origin\")) map_frame_id = rospy.get_param(\"/lanelet2_interface_ros/map_frame_id\") actual_utm_with_no_offset_frame_id =", "use in source and binary forms, with or without #", "LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR", "CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,", "= map_frame_id static_transform.child_frame_id = actual_utm_with_no_offset_frame_id static_transform.transform.translation.x = -origin_xy.x static_transform.transform.translation.y =", "map_frame_id = rospy.get_param(\"/lanelet2_interface_ros/map_frame_id\") actual_utm_with_no_offset_frame_id = rospy.get_param( \"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id\") except Exception: rospy.sleep(0.01)", "except Exception: rospy.sleep(0.01) continue return True return False if __name__", "stb.sendTransform(static_transform) def wait_for_params_successful(): global lat_origin, lon_origin, map_frame_id, actual_utm_with_no_offset_frame_id for i", "WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN", "# FZI Forschungszentrum Informatik, Karlsruhe, Germany (www.fzi.de) # KIT, Institute", "code must retain the above copyright notice, # this list", "Institute of Measurement and Control, Karlsruhe, Germany (www.mrt.kit.edu) # All", "contributors # may be used to endorse or promote products", "OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED", "its contributors # may be used to endorse or promote", "IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import", "import rospy import tf import tf2_ros import geometry_msgs.msg import lanelet2", "COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT,", "tf2_ros.TransformBroadcaster() static_transform = geometry_msgs.msg.TransformStamped() static_transform.header.stamp = rospy.Time.now() static_transform.header.frame_id = map_frame_id", "tf2_ros import geometry_msgs.msg import lanelet2 stb = None static_transform =", "geometry_msgs.msg.TransformStamped() static_transform.header.stamp = rospy.Time.now() static_transform.header.frame_id = map_frame_id static_transform.child_frame_id = actual_utm_with_no_offset_frame_id", "promote products derived from this software without # specific prior", "with or without # modification, are permitted provided that the", "OF THE POSSIBILITY OF SUCH DAMAGE. # import roslib import", "tf import tf2_ros import geometry_msgs.msg import lanelet2 stb = None", "Karlsruhe, Germany (www.fzi.de) # KIT, Institute of Measurement and Control,", "static_transform.transform.rotation.x = q[0] static_transform.transform.rotation.y = q[1] static_transform.transform.rotation.z = q[2] static_transform.transform.rotation.w", "# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY", "WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES", "# this list of conditions and the following disclaimer in", "i in range(3000): try: lat_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lat_origin\")) lon_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lon_origin\"))", "IN ANY WAY OUT OF THE USE # OF THIS", "actual_utm_with_no_offset_frame_id = rospy.get_param( \"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id\") except Exception: rospy.sleep(0.01) continue return True", "# Copyright (c) 2018 # FZI Forschungszentrum Informatik, Karlsruhe, Germany", "the distribution. # 3. Neither the name of the copyright", "# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL", "prior written permission. # # THIS SOFTWARE IS PROVIDED BY", "be used to endorse or promote products derived from this", "map_frame_id = None actual_utm_with_no_offset_frame_id = None def timer_callback(event): global stb,", "FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT", "map_frame_id static_transform.child_frame_id = actual_utm_with_no_offset_frame_id static_transform.transform.translation.x = -origin_xy.x static_transform.transform.translation.y = -origin_xy.y", "the following disclaimer in the documentation # and/or other materials", "HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS OR", "and use in source and binary forms, with or without", "of the copyright holder nor the names of its contributors", "Exception: rospy.sleep(0.01) continue return True return False if __name__ ==", "IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE", "SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR", "THE POSSIBILITY OF SUCH DAMAGE. # import roslib import rospy", "PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE,", "conditions and the following disclaimer in the documentation # and/or", "# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS", "# 3. Neither the name of the copyright holder nor", "are met: # # 1. Redistributions of source code must", "reproduce the above copyright notice, # this list of conditions", "AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT", "in binary form must reproduce the above copyright notice, #", "= -origin_xy.y static_transform.transform.translation.z = 0.0 q = tf.transformations.quaternion_from_euler(0, 0, 0)", "forms, with or without # modification, are permitted provided that", "ARISING IN ANY WAY OUT OF THE USE # OF", "map_frame_id, actual_utm_with_no_offset_frame_id for i in range(3000): try: lat_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lat_origin\"))", "binary forms, with or without # modification, are permitted provided", "# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN", "static_transform.transform.translation.z = 0.0 q = tf.transformations.quaternion_from_euler(0, 0, 0) static_transform.transform.rotation.x =", "# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR", "provided that the following conditions are met: # # 1.", "specific prior written permission. # # THIS SOFTWARE IS PROVIDED", "# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE", "Germany (www.mrt.kit.edu) # All rights reserved. # # Redistribution and", "LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN", "# # Copyright (c) 2018 # FZI Forschungszentrum Informatik, Karlsruhe,", "FZI Forschungszentrum Informatik, Karlsruhe, Germany (www.fzi.de) # KIT, Institute of", "to endorse or promote products derived from this software without", "TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF", "the documentation # and/or other materials provided with the distribution.", "are permitted provided that the following conditions are met: #", "0) static_transform.transform.rotation.x = q[0] static_transform.transform.rotation.y = q[1] static_transform.transform.rotation.z = q[2]", "= None map_frame_id = None actual_utm_with_no_offset_frame_id = None def timer_callback(event):", "the name of the copyright holder nor the names of", "lon_origin) projector = lanelet2.projection.UtmProjector( lanelet2.io.Origin(origin_latlon), False, False) origin_xy = projector.forward(origin_latlon)", "LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER", "met: # # 1. Redistributions of source code must retain", "# All rights reserved. # # Redistribution and use in", "not wait_for_params_successful(): rospy.logerr(\"map_frame_to_utm_tf_publisher: Could not initialize\") exit() origin_latlon = lanelet2.core.GPSPoint(lat_origin,", "holder nor the names of its contributors # may be", "None lon_origin = None map_frame_id = None actual_utm_with_no_offset_frame_id = None", "Redistributions in binary form must reproduce the above copyright notice,", "= -origin_xy.x static_transform.transform.translation.y = -origin_xy.y static_transform.transform.translation.z = 0.0 q =", "(c) 2018 # FZI Forschungszentrum Informatik, Karlsruhe, Germany (www.fzi.de) #", "# Redistribution and use in source and binary forms, with", "if not wait_for_params_successful(): rospy.logerr(\"map_frame_to_utm_tf_publisher: Could not initialize\") exit() origin_latlon =", "AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN", "rospy.init_node('map_frame_to_utm_tf_publisher') if not wait_for_params_successful(): rospy.logerr(\"map_frame_to_utm_tf_publisher: Could not initialize\") exit() origin_latlon", "the above copyright notice, # this list of conditions and", "conditions are met: # # 1. Redistributions of source code", "HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER", "projector.forward(origin_latlon) stb = tf2_ros.TransformBroadcaster() static_transform = geometry_msgs.msg.TransformStamped() static_transform.header.stamp = rospy.Time.now()", "lanelet2.projection.UtmProjector( lanelet2.io.Origin(origin_latlon), False, False) origin_xy = projector.forward(origin_latlon) stb = tf2_ros.TransformBroadcaster()", "list of conditions and the following disclaimer in the documentation", "SUCH DAMAGE. # import roslib import rospy import tf import", "KIT, Institute of Measurement and Control, Karlsruhe, Germany (www.mrt.kit.edu) #", "# modification, are permitted provided that the following conditions are", "DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS", "stb = tf2_ros.TransformBroadcaster() static_transform = geometry_msgs.msg.TransformStamped() static_transform.header.stamp = rospy.Time.now() static_transform.header.frame_id", "following disclaimer in the documentation # and/or other materials provided", "0, 0) static_transform.transform.rotation.x = q[0] static_transform.transform.rotation.y = q[1] static_transform.transform.rotation.z =", "FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO", "lanelet2 stb = None static_transform = None lat_origin = None", "float(rospy.get_param(\"/lanelet2_interface_ros/lat_origin\")) lon_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lon_origin\")) map_frame_id = rospy.get_param(\"/lanelet2_interface_ros/map_frame_id\") actual_utm_with_no_offset_frame_id = rospy.get_param(", "may be used to endorse or promote products derived from", "False) origin_xy = projector.forward(origin_latlon) stb = tf2_ros.TransformBroadcaster() static_transform = geometry_msgs.msg.TransformStamped()", "that the following conditions are met: # # 1. Redistributions", "IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED", "NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE", "OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE", "copyright notice, # this list of conditions and the following", "OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA,", "ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES", "True return False if __name__ == '__main__': rospy.init_node('map_frame_to_utm_tf_publisher') if not", "Copyright (c) 2018 # FZI Forschungszentrum Informatik, Karlsruhe, Germany (www.fzi.de)", "INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF", "-origin_xy.x static_transform.transform.translation.y = -origin_xy.y static_transform.transform.translation.z = 0.0 q = tf.transformations.quaternion_from_euler(0,", "conditions and the following disclaimer. # 2. Redistributions in binary", "software without # specific prior written permission. # # THIS", "OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL,", "and the following disclaimer. # 2. Redistributions in binary form", "and the following disclaimer in the documentation # and/or other", "# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND", "without # specific prior written permission. # # THIS SOFTWARE", "ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. # import roslib", "the following disclaimer. # 2. Redistributions in binary form must", "= rospy.get_param(\"/lanelet2_interface_ros/map_frame_id\") actual_utm_with_no_offset_frame_id = rospy.get_param( \"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id\") except Exception: rospy.sleep(0.01) continue", "q[0] static_transform.transform.rotation.y = q[1] static_transform.transform.rotation.z = q[2] static_transform.transform.rotation.w = q[3]", "= None def timer_callback(event): global stb, static_transform static_transform.header.stamp = rospy.Time.now()", "following disclaimer. # 2. Redistributions in binary form must reproduce", "POSSIBILITY OF SUCH DAMAGE. # import roslib import rospy import", "= tf2_ros.TransformBroadcaster() static_transform = geometry_msgs.msg.TransformStamped() static_transform.header.stamp = rospy.Time.now() static_transform.header.frame_id =", "static_transform = geometry_msgs.msg.TransformStamped() static_transform.header.stamp = rospy.Time.now() static_transform.header.frame_id = map_frame_id static_transform.child_frame_id", "LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS", "import lanelet2 stb = None static_transform = None lat_origin =", "LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS", "retain the above copyright notice, # this list of conditions", "All rights reserved. # # Redistribution and use in source", "lanelet2.io.Origin(origin_latlon), False, False) origin_xy = projector.forward(origin_latlon) stb = tf2_ros.TransformBroadcaster() static_transform", "and/or other materials provided with the distribution. # 3. Neither", "without # modification, are permitted provided that the following conditions", "CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF", "= tf.transformations.quaternion_from_euler(0, 0, 0) static_transform.transform.rotation.x = q[0] static_transform.transform.rotation.y = q[1]", "actual_utm_with_no_offset_frame_id = None def timer_callback(event): global stb, static_transform static_transform.header.stamp =", "rospy.sleep(0.01) continue return True return False if __name__ == '__main__':", "lat_origin = None lon_origin = None map_frame_id = None actual_utm_with_no_offset_frame_id", "the following conditions are met: # # 1. Redistributions of", "wait_for_params_successful(): global lat_origin, lon_origin, map_frame_id, actual_utm_with_no_offset_frame_id for i in range(3000):", "this list of conditions and the following disclaimer in the", "PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY", "modification, are permitted provided that the following conditions are met:", "IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR", "THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF", "# may be used to endorse or promote products derived", "import tf import tf2_ros import geometry_msgs.msg import lanelet2 stb =", "reserved. # # Redistribution and use in source and binary", "other materials provided with the distribution. # 3. Neither the", "for i in range(3000): try: lat_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lat_origin\")) lon_origin =", "# specific prior written permission. # # THIS SOFTWARE IS", "from this software without # specific prior written permission. #", "False, False) origin_xy = projector.forward(origin_latlon) stb = tf2_ros.TransformBroadcaster() static_transform =", "= float(rospy.get_param(\"/lanelet2_interface_ros/lon_origin\")) map_frame_id = rospy.get_param(\"/lanelet2_interface_ros/map_frame_id\") actual_utm_with_no_offset_frame_id = rospy.get_param( \"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id\") except", "def wait_for_params_successful(): global lat_origin, lon_origin, map_frame_id, actual_utm_with_no_offset_frame_id for i in", "# # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS", "rospy.get_param(\"/lanelet2_interface_ros/map_frame_id\") actual_utm_with_no_offset_frame_id = rospy.get_param( \"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id\") except Exception: rospy.sleep(0.01) continue return", "static_transform.child_frame_id = actual_utm_with_no_offset_frame_id static_transform.transform.translation.x = -origin_xy.x static_transform.transform.translation.y = -origin_xy.y static_transform.transform.translation.z", "and Control, Karlsruhe, Germany (www.mrt.kit.edu) # All rights reserved. #", "lat_origin, lon_origin, map_frame_id, actual_utm_with_no_offset_frame_id for i in range(3000): try: lat_origin", "copyright holder nor the names of its contributors # may", "OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE #", "import roslib import rospy import tf import tf2_ros import geometry_msgs.msg", "wait_for_params_successful(): rospy.logerr(\"map_frame_to_utm_tf_publisher: Could not initialize\") exit() origin_latlon = lanelet2.core.GPSPoint(lat_origin, lon_origin)", "IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"", "this software without # specific prior written permission. # #", "list of conditions and the following disclaimer. # 2. Redistributions", "# and/or other materials provided with the distribution. # 3.", "materials provided with the distribution. # 3. Neither the name", "= actual_utm_with_no_offset_frame_id static_transform.transform.translation.x = -origin_xy.x static_transform.transform.translation.y = -origin_xy.y static_transform.transform.translation.z =", "OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE #", "permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT", "STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING", "static_transform = None lat_origin = None lon_origin = None map_frame_id", "global lat_origin, lon_origin, map_frame_id, actual_utm_with_no_offset_frame_id for i in range(3000): try:", "return False if __name__ == '__main__': rospy.init_node('map_frame_to_utm_tf_publisher') if not wait_for_params_successful():", "static_transform.header.stamp = rospy.Time.now() static_transform.header.frame_id = map_frame_id static_transform.child_frame_id = actual_utm_with_no_offset_frame_id static_transform.transform.translation.x", "provided with the distribution. # 3. Neither the name of", "NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND", "lanelet2.core.GPSPoint(lat_origin, lon_origin) projector = lanelet2.projection.UtmProjector( lanelet2.io.Origin(origin_latlon), False, False) origin_xy =", "# KIT, Institute of Measurement and Control, Karlsruhe, Germany (www.mrt.kit.edu)", "= lanelet2.core.GPSPoint(lat_origin, lon_origin) projector = lanelet2.projection.UtmProjector( lanelet2.io.Origin(origin_latlon), False, False) origin_xy", "USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED", "BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY", "INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY,", "IS\" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT", "INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT", "1. Redistributions of source code must retain the above copyright", "OTHERWISE) ARISING IN ANY WAY OUT OF THE USE #", "SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS", "THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A", "COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY EXPRESS", "#!/usr/bin/env python # # Copyright (c) 2018 # FZI Forschungszentrum", "with the distribution. # 3. Neither the name of the", "= 0.0 q = tf.transformations.quaternion_from_euler(0, 0, 0) static_transform.transform.rotation.x = q[0]", "None static_transform = None lat_origin = None lon_origin = None", "Measurement and Control, Karlsruhe, Germany (www.mrt.kit.edu) # All rights reserved.", "lon_origin = float(rospy.get_param(\"/lanelet2_interface_ros/lon_origin\")) map_frame_id = rospy.get_param(\"/lanelet2_interface_ros/map_frame_id\") actual_utm_with_no_offset_frame_id = rospy.get_param( \"/lanelet2_interface_ros/actual_utm_with_no_offset_frame_id\")", "names of its contributors # may be used to endorse", "TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT", "2. Redistributions in binary form must reproduce the above copyright", "ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,", "lon_origin, map_frame_id, actual_utm_with_no_offset_frame_id for i in range(3000): try: lat_origin =", "PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT", "THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY", "must retain the above copyright notice, # this list of", "Informatik, Karlsruhe, Germany (www.fzi.de) # KIT, Institute of Measurement and", "OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER #", "return True return False if __name__ == '__main__': rospy.init_node('map_frame_to_utm_tf_publisher') if", "SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED", "BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR #", "BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND", "THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" # AND ANY", "# 2. Redistributions in binary form must reproduce the above", "Neither the name of the copyright holder nor the names", "timer_callback(event): global stb, static_transform static_transform.header.stamp = rospy.Time.now() stb.sendTransform(static_transform) def wait_for_params_successful():", "EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO," ]
[]
[ "+= len(subclauses) return total def unique_literals(*clauses): literals = set() for", "set() for clause in chain(*clauses): literals.update((abs(l) for l in clause))", "count_clauses(*clauses): total = 0 for subclauses in clauses: total +=", "def unique_literals(*clauses): literals = set() for clause in chain(*clauses): literals.update((abs(l)", "subclauses in clauses: total += len(subclauses) return total def unique_literals(*clauses):", "chain(*clauses))) def count_clauses(*clauses): total = 0 for subclauses in clauses:", "return total def unique_literals(*clauses): literals = set() for clause in", "from itertools import chain def make_comparable(*clauses): return set((frozenset(c) for c", "in chain(*clauses))) def count_clauses(*clauses): total = 0 for subclauses in", "unique_literals(*clauses): literals = set() for clause in chain(*clauses): literals.update((abs(l) for", "len(subclauses) return total def unique_literals(*clauses): literals = set() for clause", "chain def make_comparable(*clauses): return set((frozenset(c) for c in chain(*clauses))) def", "for clause in chain(*clauses): literals.update((abs(l) for l in clause)) return", "= set() for clause in chain(*clauses): literals.update((abs(l) for l in", "set((frozenset(c) for c in chain(*clauses))) def count_clauses(*clauses): total = 0", "def count_clauses(*clauses): total = 0 for subclauses in clauses: total", "import chain def make_comparable(*clauses): return set((frozenset(c) for c in chain(*clauses)))", "= 0 for subclauses in clauses: total += len(subclauses) return", "total = 0 for subclauses in clauses: total += len(subclauses)", "in clauses: total += len(subclauses) return total def unique_literals(*clauses): literals", "total def unique_literals(*clauses): literals = set() for clause in chain(*clauses):", "0 for subclauses in clauses: total += len(subclauses) return total", "literals = set() for clause in chain(*clauses): literals.update((abs(l) for l", "itertools import chain def make_comparable(*clauses): return set((frozenset(c) for c in", "<gh_stars>0 from itertools import chain def make_comparable(*clauses): return set((frozenset(c) for", "def make_comparable(*clauses): return set((frozenset(c) for c in chain(*clauses))) def count_clauses(*clauses):", "total += len(subclauses) return total def unique_literals(*clauses): literals = set()", "clause in chain(*clauses): literals.update((abs(l) for l in clause)) return literals", "make_comparable(*clauses): return set((frozenset(c) for c in chain(*clauses))) def count_clauses(*clauses): total", "for c in chain(*clauses))) def count_clauses(*clauses): total = 0 for", "return set((frozenset(c) for c in chain(*clauses))) def count_clauses(*clauses): total =", "for subclauses in clauses: total += len(subclauses) return total def", "c in chain(*clauses))) def count_clauses(*clauses): total = 0 for subclauses", "clauses: total += len(subclauses) return total def unique_literals(*clauses): literals =" ]
[ "KratosUnittest.WorkFolderScope(self.work_folder,__file__): with open(settings_filename,'r') as parameter_file: self.parameters = KratosMultiphysics.Parameters(parameter_file.read()) def runTest(self):", ": false, \"plane_output\" : [], \"nodal_results\" : [\"DENSITY\",\"MOMENTUM\",\"TOTAL_ENERGY\"], \"gauss_point_results\" :", "= \"CompressibleExplicit\" self.use_oss = False self.shock_capturing = False self._CustomizeSimulationSettings() def", "self.shock_capturing else \"\") gid_output_settings[\"Parameters\"][\"output_name\"].SetString(output_name) self.parameters[\"output_processes\"][\"gid_output\"].Append(gid_output_settings) def _AddReferenceValuesOutput(self): json_output_settings = KratosMultiphysics.Parameters(\"\"\"{", ": \"JsonOutputProcess\", \"Parameters\" : { \"output_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"output_file_name\" :", "True self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSSShockCapturing(self): self.solver_type = \"CompressibleExplicit\"", "with KratosUnittest.WorkFolderScope(self.work_folder, __file__): KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time') def _CustomizeSimulationSettings(self): # Customize simulation settings", "applications import KratosMultiphysics import KratosMultiphysics.KratosUnittest as KratosUnittest import KratosMultiphysics.kratos_utilities as", "if self.solver_type == \"CompressibleExplicit\" else \"_implicit\", \"_ASGS\" if self.use_oss ==", "_AddReferenceValuesCheck(self): json_check_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"from_json_check_result_process\", \"kratos_module\" : \"KratosMultiphysics\",", "False self.shock_capturing = True self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSS(self): self.solver_type = \"CompressibleExplicit\"", "== '__main__': test = SodShockTubeTest() test.setUp() # test.testSodShockTubeExplicitASGS() test.testSodShockTubeExplicitASGSShockCapturing() #", "\"CompressibleExplicit\" else \"_implicit\", \"_ASGS\" if self.use_oss == False else \"_OSS\",", "KratosUnittest.WorkFolderScope(self.work_folder,__file__): self.model = KratosMultiphysics.Model() simulation = FluidDynamicsAnalysis(self.model, self.parameters) simulation.Run() def", "simulation settings self.parameters[\"solver_settings\"][\"solver_type\"].SetString(self.solver_type) self.parameters[\"solver_settings\"][\"use_oss\"].SetBool(self.use_oss) self.parameters[\"solver_settings\"][\"shock_capturing\"].SetBool(self.shock_capturing) def _AddOutput(self): gid_output_settings = KratosMultiphysics.Parameters(\"\"\"{", "SodShockTubeTest(KratosUnittest.TestCase): def testSodShockTubeExplicitASGS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = False self.shock_capturing", "= 1.0e-10 self.work_folder = \"sod_shock_tube_test\" settings_filename = \"ProjectParameters.json\" # Read", "self.parameters[\"solver_settings\"][\"solver_type\"].SetString(self.solver_type) self.parameters[\"solver_settings\"][\"use_oss\"].SetBool(self.use_oss) self.parameters[\"solver_settings\"][\"shock_capturing\"].SetBool(self.shock_capturing) def _AddOutput(self): gid_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" :", "= True self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss =", "= True self.shock_capturing = True self._CustomizeSimulationSettings() def setUp(self): self.print_output =", "\"This process writes postprocessing files for GiD\", \"Parameters\" : {", "\"TO_BE_DEFINED\", \"postprocess_parameters\" : { \"result_file_configuration\" : { \"gidpost_flags\" : {", "\"FromJsonCheckResultProcess\", \"Parameters\" : { \"check_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"input_file_name\" : \"TO_BE_DEFINED\",", "runTest(self): # If required, add the output process to the", "\"WriteConditions\", \"MultiFileFlag\" : \"SingleFile\" }, \"file_label\" : \"step\", \"output_control_type\" :", "[\"DENSITY\",\"MOMENTUM\",\"TOTAL_ENERGY\"], \"gauss_point_results\" : [\"SHOCK_SENSOR\",\"THERMAL_SENSOR\",\"SHEAR_SENSOR\"], \"nodal_nonhistorical_results\" : [\"ARTIFICIAL_BULK_VISCOSITY\",\"ARTIFICIAL_CONDUCTIVITY\",\"ARTIFICIAL_DYNAMIC_VISCOSITY\"] }, \"point_data_configuration\" :", "def tearDown(self): with KratosUnittest.WorkFolderScope(self.work_folder, __file__): KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time') def _CustomizeSimulationSettings(self): # Customize", "[\"SHOCK_SENSOR\",\"THERMAL_SENSOR\",\"SHEAR_SENSOR\"], \"nodal_nonhistorical_results\" : [\"ARTIFICIAL_BULK_VISCOSITY\",\"ARTIFICIAL_CONDUCTIVITY\",\"ARTIFICIAL_DYNAMIC_VISCOSITY\"] }, \"point_data_configuration\" : [] } }", "parameter_file: self.parameters = KratosMultiphysics.Parameters(parameter_file.read()) def runTest(self): # If required, add", "self.use_oss == False else \"_OSS\", \"_SC\" if self.shock_capturing else \"\")", "from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis class SodShockTubeTest(KratosUnittest.TestCase): def testSodShockTubeExplicitASGS(self): self.solver_type =", "# Create the test simulation with KratosUnittest.WorkFolderScope(self.work_folder,__file__): self.model = KratosMultiphysics.Model()", "FluidDynamicsAnalysis(self.model, self.parameters) simulation.Run() def tearDown(self): with KratosUnittest.WorkFolderScope(self.work_folder, __file__): KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time') def", "self.parameters = KratosMultiphysics.Parameters(parameter_file.read()) def runTest(self): # If required, add the", "the test simulation with KratosUnittest.WorkFolderScope(self.work_folder,__file__): self.model = KratosMultiphysics.Model() simulation =", "\"gid_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"GiDOutputProcess\", \"help\" : \"This", "\"skin_output\" : false, \"plane_output\" : [], \"nodal_results\" : [\"DENSITY\",\"MOMENTUM\",\"TOTAL_ENERGY\"], \"gauss_point_results\"", ": \"WriteDeformed\", \"WriteConditionsFlag\" : \"WriteConditions\", \"MultiFileFlag\" : \"SingleFile\" }, \"file_label\"", "\"_explicit\" if self.solver_type == \"CompressibleExplicit\" else \"_implicit\", \"_ASGS\" if self.use_oss", "if self.shock_capturing else \"\") json_output_settings[\"Parameters\"][\"output_file_name\"].SetString(output_file_name) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_output_settings) def _AddReferenceValuesCheck(self): json_check_settings =", "\"\") json_check_settings[\"Parameters\"][\"input_file_name\"].SetString(input_file_name) json_check_settings[\"Parameters\"][\"tolerance\"].SetDouble(self.check_absolute_tolerance) json_check_settings[\"Parameters\"][\"relative_tolerance\"].SetDouble(self.check_relative_tolerance) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_check_settings) if __name__ == '__main__': test", "\"_SC\" if self.shock_capturing else \"\") json_check_settings[\"Parameters\"][\"input_file_name\"].SetString(input_file_name) json_check_settings[\"Parameters\"][\"tolerance\"].SetDouble(self.check_absolute_tolerance) json_check_settings[\"Parameters\"][\"relative_tolerance\"].SetDouble(self.check_relative_tolerance) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_check_settings) if", "\"gidpost_flags\" : { \"GiDPostMode\" : \"GiD_PostBinary\", \"WriteDeformedMeshFlag\" : \"WriteDeformed\", \"WriteConditionsFlag\"", "Create the test simulation with KratosUnittest.WorkFolderScope(self.work_folder,__file__): self.model = KratosMultiphysics.Model() simulation", "self.parameters[\"solver_settings\"][\"shock_capturing\"].SetBool(self.shock_capturing) def _AddOutput(self): gid_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"gid_output_process\", \"kratos_module\"", "def testSodShockTubeExplicitOSSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = True self.shock_capturing =", "# Customize simulation settings self.parameters[\"solver_settings\"][\"solver_type\"].SetString(self.solver_type) self.parameters[\"solver_settings\"][\"use_oss\"].SetBool(self.use_oss) self.parameters[\"solver_settings\"][\"shock_capturing\"].SetBool(self.shock_capturing) def _AddOutput(self): gid_output_settings", ": \"FluidModelPart\", \"output_name\" : \"TO_BE_DEFINED\", \"postprocess_parameters\" : { \"result_file_configuration\" :", "output process to the test settings if self.print_reference_values: self._AddReferenceValuesOutput() else:", "if self.print_reference_values: self._AddReferenceValuesOutput() else: self._AddReferenceValuesCheck() # Create the test simulation", "== False else \"_OSS\", \"_SC\" if self.shock_capturing else \"\") json_check_settings[\"Parameters\"][\"input_file_name\"].SetString(input_file_name)", "output process to the test settings if self.print_output: self._AddOutput() #", "KratosUtilities from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis class SodShockTubeTest(KratosUnittest.TestCase): def testSodShockTubeExplicitASGS(self): self.solver_type", "\"result_file_configuration\" : { \"gidpost_flags\" : { \"GiDPostMode\" : \"GiD_PostBinary\", \"WriteDeformedMeshFlag\"", "self._AddOutput() # If required, add the reference values output process", "= False self.check_absolute_tolerance = 1.0e-8 self.check_relative_tolerance = 1.0e-10 self.work_folder =", "KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis class SodShockTubeTest(KratosUnittest.TestCase): def testSodShockTubeExplicitASGS(self): self.solver_type = \"CompressibleExplicit\"", "\"Parameters\" : { \"output_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"output_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\"", ": \"KratosMultiphysics\", \"process_name\" : \"GiDOutputProcess\", \"help\" : \"This process writes", ": \"SingleFile\" }, \"file_label\" : \"step\", \"output_control_type\" : \"step\", \"output_frequency\"", "def _AddReferenceValuesCheck(self): json_check_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"from_json_check_result_process\", \"kratos_module\" :", "if self.print_output: self._AddOutput() # If required, add the reference values", "KratosUnittest import KratosMultiphysics.kratos_utilities as KratosUtilities from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis class", "\"FluidModelPart\", \"output_name\" : \"TO_BE_DEFINED\", \"postprocess_parameters\" : { \"result_file_configuration\" : {", ": { \"check_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"input_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" :", "\"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"time_frequency\" : 0.025 } }\"\"\") output_file_name", "self.parameters[\"solver_settings\"][\"use_oss\"].SetBool(self.use_oss) self.parameters[\"solver_settings\"][\"shock_capturing\"].SetBool(self.shock_capturing) def _AddOutput(self): gid_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"gid_output_process\",", "if self.use_oss == False else \"_OSS\", \"_SC\" if self.shock_capturing else", "the simulation settings with KratosUnittest.WorkFolderScope(self.work_folder,__file__): with open(settings_filename,'r') as parameter_file: self.parameters", "\"postprocess_parameters\" : { \"result_file_configuration\" : { \"gidpost_flags\" : { \"GiDPostMode\"", "class SodShockTubeTest(KratosUnittest.TestCase): def testSodShockTubeExplicitASGS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = False", "\"from_json_check_result_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"FromJsonCheckResultProcess\", \"Parameters\" : {", "\"_SC\" if self.shock_capturing else \"\") json_output_settings[\"Parameters\"][\"output_file_name\"].SetString(output_file_name) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_output_settings) def _AddReferenceValuesCheck(self): json_check_settings", "writes postprocessing files for GiD\", \"Parameters\" : { \"model_part_name\" :", "== \"CompressibleExplicit\" else \"_implicit\", \"_ASGS\" if self.use_oss == False else", "true, \"node_output\" : false, \"skin_output\" : false, \"plane_output\" : [],", "import KratosMultiphysics import KratosMultiphysics.KratosUnittest as KratosUnittest import KratosMultiphysics.kratos_utilities as KratosUtilities", "self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = True self.shock_capturing", "FluidDynamicsAnalysis class SodShockTubeTest(KratosUnittest.TestCase): def testSodShockTubeExplicitASGS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss =", "\"output_name\" : \"TO_BE_DEFINED\", \"postprocess_parameters\" : { \"result_file_configuration\" : { \"gidpost_flags\"", "test.setUp() # test.testSodShockTubeExplicitASGS() test.testSodShockTubeExplicitASGSShockCapturing() # test.testSodShockTubeExplicitOSS() # test.testSodShockTubeExplicitOSSShockCapturing() test.runTest() test.tearDown()", "\"WriteDeformedMeshFlag\" : \"WriteDeformed\", \"WriteConditionsFlag\" : \"WriteConditions\", \"MultiFileFlag\" : \"SingleFile\" },", "self.model = KratosMultiphysics.Model() simulation = FluidDynamicsAnalysis(self.model, self.parameters) simulation.Run() def tearDown(self):", "KratosMultiphysics import KratosMultiphysics.KratosUnittest as KratosUnittest import KratosMultiphysics.kratos_utilities as KratosUtilities from", "{ \"output_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"output_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\",", "\"_implicit\", \"_ASGS\" if self.use_oss == False else \"_OSS\", \"_SC\" if", "the test settings if self.print_output: self._AddOutput() # If required, add", ": \"GiDOutputProcess\", \"help\" : \"This process writes postprocessing files for", "If required, add the output process to the test settings", "KratosUnittest.WorkFolderScope(self.work_folder, __file__): KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time') def _CustomizeSimulationSettings(self): # Customize simulation settings self.parameters[\"solver_settings\"][\"solver_type\"].SetString(self.solver_type)", ": 0.0, \"relative_tolerance\" : 0.0, \"time_frequency\" : 0.025 } }\"\"\")", "\"CompressibleExplicit\" self.use_oss = True self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSSShockCapturing(self):", "\"FluidModelPart.FluidParts_Fluid\", \"time_frequency\" : 0.025 } }\"\"\") output_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\"", "settings if self.print_reference_values: self._AddReferenceValuesOutput() else: self._AddReferenceValuesCheck() # Create the test", "and applications import KratosMultiphysics import KratosMultiphysics.KratosUnittest as KratosUnittest import KratosMultiphysics.kratos_utilities", "as KratosUnittest import KratosMultiphysics.kratos_utilities as KratosUtilities from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis", "False self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = True", "testSodShockTubeExplicitOSS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = True self.shock_capturing = False", "\"sod_shock_tube{0}{1}{2}\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\" else \"_implicit\", \"_ASGS\" if", "== False else \"_OSS\", \"_SC\" if self.shock_capturing else \"\") json_output_settings[\"Parameters\"][\"output_file_name\"].SetString(output_file_name)", "self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_check_settings) if __name__ == '__main__': test = SodShockTubeTest() test.setUp() #", "\"output_frequency\" : 1.0, \"body_output\" : true, \"node_output\" : false, \"skin_output\"", "GiD\", \"Parameters\" : { \"model_part_name\" : \"FluidModelPart\", \"output_name\" : \"TO_BE_DEFINED\",", "__file__): KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time') def _CustomizeSimulationSettings(self): # Customize simulation settings self.parameters[\"solver_settings\"][\"solver_type\"].SetString(self.solver_type) self.parameters[\"solver_settings\"][\"use_oss\"].SetBool(self.use_oss)", "output_name = \"sod_shock_tube{0}{1}{2}\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\" else \"_implicit\",", "if self.shock_capturing else \"\") json_check_settings[\"Parameters\"][\"input_file_name\"].SetString(input_file_name) json_check_settings[\"Parameters\"][\"tolerance\"].SetDouble(self.check_absolute_tolerance) json_check_settings[\"Parameters\"][\"relative_tolerance\"].SetDouble(self.check_relative_tolerance) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_check_settings) if __name__", "False else \"_OSS\", \"_SC\" if self.shock_capturing else \"\") gid_output_settings[\"Parameters\"][\"output_name\"].SetString(output_name) self.parameters[\"output_processes\"][\"gid_output\"].Append(gid_output_settings)", "\"sod_shock_tube_test\" settings_filename = \"ProjectParameters.json\" # Read the simulation settings with", "\"output_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"output_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"time_frequency\"", "= KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"json_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" :", "self.shock_capturing else \"\") json_check_settings[\"Parameters\"][\"input_file_name\"].SetString(input_file_name) json_check_settings[\"Parameters\"][\"tolerance\"].SetDouble(self.check_absolute_tolerance) json_check_settings[\"Parameters\"][\"relative_tolerance\"].SetDouble(self.check_relative_tolerance) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_check_settings) if __name__ ==", "test settings if self.print_reference_values: self._AddReferenceValuesOutput() else: self._AddReferenceValuesCheck() # Create the", "\"time_frequency\" : 0.025 } }\"\"\") input_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if", "open(settings_filename,'r') as parameter_file: self.parameters = KratosMultiphysics.Parameters(parameter_file.read()) def runTest(self): # If", "with open(settings_filename,'r') as parameter_file: self.parameters = KratosMultiphysics.Parameters(parameter_file.read()) def runTest(self): #", "\"_OSS\", \"_SC\" if self.shock_capturing else \"\") gid_output_settings[\"Parameters\"][\"output_name\"].SetString(output_name) self.parameters[\"output_processes\"][\"gid_output\"].Append(gid_output_settings) def _AddReferenceValuesOutput(self):", "# Read the simulation settings with KratosUnittest.WorkFolderScope(self.work_folder,__file__): with open(settings_filename,'r') as", "= True self._CustomizeSimulationSettings() def setUp(self): self.print_output = False self.print_reference_values =", ": [\"ARTIFICIAL_BULK_VISCOSITY\",\"ARTIFICIAL_CONDUCTIVITY\",\"ARTIFICIAL_DYNAMIC_VISCOSITY\"] }, \"point_data_configuration\" : [] } } }\"\"\") output_name", "input_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\" else \"_implicit\",", "def _AddOutput(self): gid_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"gid_output_process\", \"kratos_module\" :", "\"CompressibleExplicit\" self.use_oss = False self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitASGSShockCapturing(self):", "KratosMultiphysics.Model() simulation = FluidDynamicsAnalysis(self.model, self.parameters) simulation.Run() def tearDown(self): with KratosUnittest.WorkFolderScope(self.work_folder,", "\"GiDOutputProcess\", \"help\" : \"This process writes postprocessing files for GiD\",", "If required, add the reference values output process to the", "0.025 } }\"\"\") input_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type ==", ": { \"result_file_configuration\" : { \"gidpost_flags\" : { \"GiDPostMode\" :", "else \"_OSS\", \"_SC\" if self.shock_capturing else \"\") json_output_settings[\"Parameters\"][\"output_file_name\"].SetString(output_file_name) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_output_settings) def", "\"GiDPostMode\" : \"GiD_PostBinary\", \"WriteDeformedMeshFlag\" : \"WriteDeformed\", \"WriteConditionsFlag\" : \"WriteConditions\", \"MultiFileFlag\"", "self.solver_type = \"CompressibleExplicit\" self.use_oss = True self.shock_capturing = False self._CustomizeSimulationSettings()", "simulation settings with KratosUnittest.WorkFolderScope(self.work_folder,__file__): with open(settings_filename,'r') as parameter_file: self.parameters =", "self.print_output: self._AddOutput() # If required, add the reference values output", "} } }\"\"\") output_name = \"sod_shock_tube{0}{1}{2}\".format( \"_explicit\" if self.solver_type ==", "\"Parameters\" : { \"check_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"input_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\"", "testSodShockTubeExplicitASGSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = False self.shock_capturing = True", "process writes postprocessing files for GiD\", \"Parameters\" : { \"model_part_name\"", ": [\"DENSITY\",\"MOMENTUM\",\"TOTAL_ENERGY\"], \"gauss_point_results\" : [\"SHOCK_SENSOR\",\"THERMAL_SENSOR\",\"SHEAR_SENSOR\"], \"nodal_nonhistorical_results\" : [\"ARTIFICIAL_BULK_VISCOSITY\",\"ARTIFICIAL_CONDUCTIVITY\",\"ARTIFICIAL_DYNAMIC_VISCOSITY\"] }, \"point_data_configuration\"", "0.0, \"time_frequency\" : 0.025 } }\"\"\") input_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\"", "self._CustomizeSimulationSettings() def setUp(self): self.print_output = False self.print_reference_values = False self.check_absolute_tolerance", "simulation with KratosUnittest.WorkFolderScope(self.work_folder,__file__): self.model = KratosMultiphysics.Model() simulation = FluidDynamicsAnalysis(self.model, self.parameters)", "= SodShockTubeTest() test.setUp() # test.testSodShockTubeExplicitASGS() test.testSodShockTubeExplicitASGSShockCapturing() # test.testSodShockTubeExplicitOSS() # test.testSodShockTubeExplicitOSSShockCapturing()", "= \"CompressibleExplicit\" self.use_oss = False self.shock_capturing = True self._CustomizeSimulationSettings() def", ": \"WriteConditions\", \"MultiFileFlag\" : \"SingleFile\" }, \"file_label\" : \"step\", \"output_control_type\"", "1.0e-8 self.check_relative_tolerance = 1.0e-10 self.work_folder = \"sod_shock_tube_test\" settings_filename = \"ProjectParameters.json\"", "core and applications import KratosMultiphysics import KratosMultiphysics.KratosUnittest as KratosUnittest import", "\"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"JsonOutputProcess\", \"Parameters\" : { \"output_variables\"", "\"step\", \"output_control_type\" : \"step\", \"output_frequency\" : 1.0, \"body_output\" : true,", "json_check_settings[\"Parameters\"][\"tolerance\"].SetDouble(self.check_absolute_tolerance) json_check_settings[\"Parameters\"][\"relative_tolerance\"].SetDouble(self.check_relative_tolerance) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_check_settings) if __name__ == '__main__': test = SodShockTubeTest()", "if __name__ == '__main__': test = SodShockTubeTest() test.setUp() # test.testSodShockTubeExplicitASGS()", "= FluidDynamicsAnalysis(self.model, self.parameters) simulation.Run() def tearDown(self): with KratosUnittest.WorkFolderScope(self.work_folder, __file__): KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time')", "\"python_module\" : \"json_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"JsonOutputProcess\", \"Parameters\"", "simulation = FluidDynamicsAnalysis(self.model, self.parameters) simulation.Run() def tearDown(self): with KratosUnittest.WorkFolderScope(self.work_folder, __file__):", "= 1.0e-8 self.check_relative_tolerance = 1.0e-10 self.work_folder = \"sod_shock_tube_test\" settings_filename =", "KratosMultiphysics.Parameters(parameter_file.read()) def runTest(self): # If required, add the output process", "\"_SC\" if self.shock_capturing else \"\") gid_output_settings[\"Parameters\"][\"output_name\"].SetString(output_name) self.parameters[\"output_processes\"][\"gid_output\"].Append(gid_output_settings) def _AddReferenceValuesOutput(self): json_output_settings", "\"CompressibleExplicit\" self.use_oss = False self.shock_capturing = True self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSS(self):", "\"time_frequency\" : 0.025 } }\"\"\") output_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if", "= KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"from_json_check_result_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" :", "}\"\"\") output_name = \"sod_shock_tube{0}{1}{2}\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\" else", "\"nodal_results\" : [\"DENSITY\",\"MOMENTUM\",\"TOTAL_ENERGY\"], \"gauss_point_results\" : [\"SHOCK_SENSOR\",\"THERMAL_SENSOR\",\"SHEAR_SENSOR\"], \"nodal_nonhistorical_results\" : [\"ARTIFICIAL_BULK_VISCOSITY\",\"ARTIFICIAL_CONDUCTIVITY\",\"ARTIFICIAL_DYNAMIC_VISCOSITY\"] },", "\"gauss_point_results\" : [\"SHOCK_SENSOR\",\"THERMAL_SENSOR\",\"SHEAR_SENSOR\"], \"nodal_nonhistorical_results\" : [\"ARTIFICIAL_BULK_VISCOSITY\",\"ARTIFICIAL_CONDUCTIVITY\",\"ARTIFICIAL_DYNAMIC_VISCOSITY\"] }, \"point_data_configuration\" : []", "= \"CompressibleExplicit\" self.use_oss = True self.shock_capturing = False self._CustomizeSimulationSettings() def", "process to the test settings if self.print_output: self._AddOutput() # If", "== False else \"_OSS\", \"_SC\" if self.shock_capturing else \"\") gid_output_settings[\"Parameters\"][\"output_name\"].SetString(output_name)", "= False self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitASGSShockCapturing(self): self.solver_type =", "\"KratosMultiphysics\", \"process_name\" : \"JsonOutputProcess\", \"Parameters\" : { \"output_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"],", "the output process to the test settings if self.print_output: self._AddOutput()", ": \"KratosMultiphysics\", \"process_name\" : \"FromJsonCheckResultProcess\", \"Parameters\" : { \"check_variables\" :", "= True self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSSShockCapturing(self): self.solver_type =", "0.0, \"relative_tolerance\" : 0.0, \"time_frequency\" : 0.025 } }\"\"\") input_file_name", "self.shock_capturing = True self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss", "\"json_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"JsonOutputProcess\", \"Parameters\" : {", "\"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"tolerance\" : 0.0, \"relative_tolerance\" : 0.0, \"time_frequency\"", "gid_output_settings[\"Parameters\"][\"output_name\"].SetString(output_name) self.parameters[\"output_processes\"][\"gid_output\"].Append(gid_output_settings) def _AddReferenceValuesOutput(self): json_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"json_output_process\",", "self._AddReferenceValuesOutput() else: self._AddReferenceValuesCheck() # Create the test simulation with KratosUnittest.WorkFolderScope(self.work_folder,__file__):", "simulation.Run() def tearDown(self): with KratosUnittest.WorkFolderScope(self.work_folder, __file__): KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time') def _CustomizeSimulationSettings(self): #", "\"KratosMultiphysics\", \"process_name\" : \"GiDOutputProcess\", \"help\" : \"This process writes postprocessing", "else \"\") json_output_settings[\"Parameters\"][\"output_file_name\"].SetString(output_file_name) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_output_settings) def _AddReferenceValuesCheck(self): json_check_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\"", "False self.print_reference_values = False self.check_absolute_tolerance = 1.0e-8 self.check_relative_tolerance = 1.0e-10", "\"plane_output\" : [], \"nodal_results\" : [\"DENSITY\",\"MOMENTUM\",\"TOTAL_ENERGY\"], \"gauss_point_results\" : [\"SHOCK_SENSOR\",\"THERMAL_SENSOR\",\"SHEAR_SENSOR\"], \"nodal_nonhistorical_results\"", "as KratosUtilities from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis class SodShockTubeTest(KratosUnittest.TestCase): def testSodShockTubeExplicitASGS(self):", "def _AddReferenceValuesOutput(self): json_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"json_output_process\", \"kratos_module\" :", "self.parameters) simulation.Run() def tearDown(self): with KratosUnittest.WorkFolderScope(self.work_folder, __file__): KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time') def _CustomizeSimulationSettings(self):", "settings self.parameters[\"solver_settings\"][\"solver_type\"].SetString(self.solver_type) self.parameters[\"solver_settings\"][\"use_oss\"].SetBool(self.use_oss) self.parameters[\"solver_settings\"][\"shock_capturing\"].SetBool(self.shock_capturing) def _AddOutput(self): gid_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\"", "False self._CustomizeSimulationSettings() def testSodShockTubeExplicitASGSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = False", ": { \"gidpost_flags\" : { \"GiDPostMode\" : \"GiD_PostBinary\", \"WriteDeformedMeshFlag\" :", ": [\"SHOCK_SENSOR\",\"THERMAL_SENSOR\",\"SHEAR_SENSOR\"], \"nodal_nonhistorical_results\" : [\"ARTIFICIAL_BULK_VISCOSITY\",\"ARTIFICIAL_CONDUCTIVITY\",\"ARTIFICIAL_DYNAMIC_VISCOSITY\"] }, \"point_data_configuration\" : [] }", "output_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\" else \"_implicit\",", "True self._CustomizeSimulationSettings() def setUp(self): self.print_output = False self.print_reference_values = False", "{ \"model_part_name\" : \"FluidModelPart\", \"output_name\" : \"TO_BE_DEFINED\", \"postprocess_parameters\" : {", "True self.shock_capturing = True self._CustomizeSimulationSettings() def setUp(self): self.print_output = False", "= KratosMultiphysics.Model() simulation = FluidDynamicsAnalysis(self.model, self.parameters) simulation.Run() def tearDown(self): with", "= \"ProjectParameters.json\" # Read the simulation settings with KratosUnittest.WorkFolderScope(self.work_folder,__file__): with", ": \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"tolerance\" : 0.0, \"relative_tolerance\" :", ": \"from_json_check_result_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"FromJsonCheckResultProcess\", \"Parameters\" :", "} }\"\"\") input_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\"", ": [] } } }\"\"\") output_name = \"sod_shock_tube{0}{1}{2}\".format( \"_explicit\" if", "False else \"_OSS\", \"_SC\" if self.shock_capturing else \"\") json_output_settings[\"Parameters\"][\"output_file_name\"].SetString(output_file_name) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_output_settings)", "self.solver_type == \"CompressibleExplicit\" else \"_implicit\", \"_ASGS\" if self.use_oss == False", "self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_output_settings) def _AddReferenceValuesCheck(self): json_check_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"from_json_check_result_process\", \"kratos_module\"", ": 0.025 } }\"\"\") input_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type", "\"process_name\" : \"FromJsonCheckResultProcess\", \"Parameters\" : { \"check_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"input_file_name\"", "= False self.shock_capturing = True self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSS(self): self.solver_type =", "json_output_settings[\"Parameters\"][\"output_file_name\"].SetString(output_file_name) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_output_settings) def _AddReferenceValuesCheck(self): json_check_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"from_json_check_result_process\",", "[], \"nodal_results\" : [\"DENSITY\",\"MOMENTUM\",\"TOTAL_ENERGY\"], \"gauss_point_results\" : [\"SHOCK_SENSOR\",\"THERMAL_SENSOR\",\"SHEAR_SENSOR\"], \"nodal_nonhistorical_results\" : [\"ARTIFICIAL_BULK_VISCOSITY\",\"ARTIFICIAL_CONDUCTIVITY\",\"ARTIFICIAL_DYNAMIC_VISCOSITY\"]", "kratos core and applications import KratosMultiphysics import KratosMultiphysics.KratosUnittest as KratosUnittest", "required, add the reference values output process to the test", "\"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"time_frequency\" : 0.025 } }\"\"\") output_file_name =", "self.use_oss = False self.shock_capturing = True self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSS(self): self.solver_type", ": [], \"nodal_results\" : [\"DENSITY\",\"MOMENTUM\",\"TOTAL_ENERGY\"], \"gauss_point_results\" : [\"SHOCK_SENSOR\",\"THERMAL_SENSOR\",\"SHEAR_SENSOR\"], \"nodal_nonhistorical_results\" :", "self.parameters[\"output_processes\"][\"gid_output\"].Append(gid_output_settings) def _AddReferenceValuesOutput(self): json_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"json_output_process\", \"kratos_module\"", "0.025 } }\"\"\") output_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type ==", "\"check_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"input_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"tolerance\"", ": \"TO_BE_DEFINED\", \"postprocess_parameters\" : { \"result_file_configuration\" : { \"gidpost_flags\" :", "def _CustomizeSimulationSettings(self): # Customize simulation settings self.parameters[\"solver_settings\"][\"solver_type\"].SetString(self.solver_type) self.parameters[\"solver_settings\"][\"use_oss\"].SetBool(self.use_oss) self.parameters[\"solver_settings\"][\"shock_capturing\"].SetBool(self.shock_capturing) def", "json_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"json_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\"", "__name__ == '__main__': test = SodShockTubeTest() test.setUp() # test.testSodShockTubeExplicitASGS() test.testSodShockTubeExplicitASGSShockCapturing()", "\"relative_tolerance\" : 0.0, \"time_frequency\" : 0.025 } }\"\"\") input_file_name =", "test simulation with KratosUnittest.WorkFolderScope(self.work_folder,__file__): self.model = KratosMultiphysics.Model() simulation = FluidDynamicsAnalysis(self.model,", "if self.shock_capturing else \"\") gid_output_settings[\"Parameters\"][\"output_name\"].SetString(output_name) self.parameters[\"output_processes\"][\"gid_output\"].Append(gid_output_settings) def _AddReferenceValuesOutput(self): json_output_settings =", "= \"CompressibleExplicit\" self.use_oss = True self.shock_capturing = True self._CustomizeSimulationSettings() def", "KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time') def _CustomizeSimulationSettings(self): # Customize simulation settings self.parameters[\"solver_settings\"][\"solver_type\"].SetString(self.solver_type) self.parameters[\"solver_settings\"][\"use_oss\"].SetBool(self.use_oss) self.parameters[\"solver_settings\"][\"shock_capturing\"].SetBool(self.shock_capturing)", "= False self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss =", "self.use_oss = False self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitASGSShockCapturing(self): self.solver_type", "to the test settings if self.print_output: self._AddOutput() # If required,", ": \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"time_frequency\" : 0.025 } }\"\"\")", ": \"FluidModelPart.FluidParts_Fluid\", \"tolerance\" : 0.0, \"relative_tolerance\" : 0.0, \"time_frequency\" :", ": \"gid_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"GiDOutputProcess\", \"help\" :", "values output process to the test settings if self.print_reference_values: self._AddReferenceValuesOutput()", ": \"FluidModelPart.FluidParts_Fluid\", \"time_frequency\" : 0.025 } }\"\"\") output_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format(", "self._CustomizeSimulationSettings() def testSodShockTubeExplicitASGSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = False self.shock_capturing", "\"WriteConditionsFlag\" : \"WriteConditions\", \"MultiFileFlag\" : \"SingleFile\" }, \"file_label\" : \"step\",", "json_check_settings[\"Parameters\"][\"relative_tolerance\"].SetDouble(self.check_relative_tolerance) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_check_settings) if __name__ == '__main__': test = SodShockTubeTest() test.setUp()", "\"help\" : \"This process writes postprocessing files for GiD\", \"Parameters\"", ": false, \"skin_output\" : false, \"plane_output\" : [], \"nodal_results\" :", "= \"sod_shock_tube{0}{1}{2}\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\" else \"_implicit\", \"_ASGS\"", "} }\"\"\") output_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\"", "required, add the output process to the test settings if", "Customize simulation settings self.parameters[\"solver_settings\"][\"solver_type\"].SetString(self.solver_type) self.parameters[\"solver_settings\"][\"use_oss\"].SetBool(self.use_oss) self.parameters[\"solver_settings\"][\"shock_capturing\"].SetBool(self.shock_capturing) def _AddOutput(self): gid_output_settings =", "\"node_output\" : false, \"skin_output\" : false, \"plane_output\" : [], \"nodal_results\"", "import FluidDynamicsAnalysis class SodShockTubeTest(KratosUnittest.TestCase): def testSodShockTubeExplicitASGS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss", ": \"This process writes postprocessing files for GiD\", \"Parameters\" :", ": \"step\", \"output_control_type\" : \"step\", \"output_frequency\" : 1.0, \"body_output\" :", "\"CompressibleExplicit\" self.use_oss = True self.shock_capturing = True self._CustomizeSimulationSettings() def setUp(self):", "\"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"GiDOutputProcess\", \"help\" : \"This process", "self.use_oss = True self.shock_capturing = True self._CustomizeSimulationSettings() def setUp(self): self.print_output", "gid_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"gid_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\"", "# Import kratos core and applications import KratosMultiphysics import KratosMultiphysics.KratosUnittest", "\"MultiFileFlag\" : \"SingleFile\" }, \"file_label\" : \"step\", \"output_control_type\" : \"step\",", "\"file_label\" : \"step\", \"output_control_type\" : \"step\", \"output_frequency\" : 1.0, \"body_output\"", "\"\") gid_output_settings[\"Parameters\"][\"output_name\"].SetString(output_name) self.parameters[\"output_processes\"][\"gid_output\"].Append(gid_output_settings) def _AddReferenceValuesOutput(self): json_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" :", "\"_ASGS\" if self.use_oss == False else \"_OSS\", \"_SC\" if self.shock_capturing", "\"python_module\" : \"from_json_check_result_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"FromJsonCheckResultProcess\", \"Parameters\"", "settings_filename = \"ProjectParameters.json\" # Read the simulation settings with KratosUnittest.WorkFolderScope(self.work_folder,__file__):", "\"KratosMultiphysics\", \"process_name\" : \"FromJsonCheckResultProcess\", \"Parameters\" : { \"check_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"],", ": \"json_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"JsonOutputProcess\", \"Parameters\" :", "{ \"check_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"input_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\",", "else \"\") json_check_settings[\"Parameters\"][\"input_file_name\"].SetString(input_file_name) json_check_settings[\"Parameters\"][\"tolerance\"].SetDouble(self.check_absolute_tolerance) json_check_settings[\"Parameters\"][\"relative_tolerance\"].SetDouble(self.check_relative_tolerance) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_check_settings) if __name__ == '__main__':", "}\"\"\") input_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\" else", "\"point_data_configuration\" : [] } } }\"\"\") output_name = \"sod_shock_tube{0}{1}{2}\".format( \"_explicit\"", "_AddOutput(self): gid_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"gid_output_process\", \"kratos_module\" : \"KratosMultiphysics\",", "add the output process to the test settings if self.print_output:", ": { \"output_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"output_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" :", "False self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitASGSShockCapturing(self): self.solver_type = \"CompressibleExplicit\"", "else: self._AddReferenceValuesCheck() # Create the test simulation with KratosUnittest.WorkFolderScope(self.work_folder,__file__): self.model", "= False self.print_reference_values = False self.check_absolute_tolerance = 1.0e-8 self.check_relative_tolerance =", "\"_OSS\", \"_SC\" if self.shock_capturing else \"\") json_output_settings[\"Parameters\"][\"output_file_name\"].SetString(output_file_name) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_output_settings) def _AddReferenceValuesCheck(self):", "KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"json_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"JsonOutputProcess\",", "else \"_OSS\", \"_SC\" if self.shock_capturing else \"\") json_check_settings[\"Parameters\"][\"input_file_name\"].SetString(input_file_name) json_check_settings[\"Parameters\"][\"tolerance\"].SetDouble(self.check_absolute_tolerance) json_check_settings[\"Parameters\"][\"relative_tolerance\"].SetDouble(self.check_relative_tolerance)", "settings with KratosUnittest.WorkFolderScope(self.work_folder,__file__): with open(settings_filename,'r') as parameter_file: self.parameters = KratosMultiphysics.Parameters(parameter_file.read())", "\"process_name\" : \"JsonOutputProcess\", \"Parameters\" : { \"output_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"output_file_name\"", "def setUp(self): self.print_output = False self.print_reference_values = False self.check_absolute_tolerance =", "\"body_output\" : true, \"node_output\" : false, \"skin_output\" : false, \"plane_output\"", "} }\"\"\") output_name = \"sod_shock_tube{0}{1}{2}\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\"", "[] } } }\"\"\") output_name = \"sod_shock_tube{0}{1}{2}\".format( \"_explicit\" if self.solver_type", "}\"\"\") output_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\" else", "= False self._CustomizeSimulationSettings() def testSodShockTubeExplicitASGSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss =", "def runTest(self): # If required, add the output process to", "\"WriteDeformed\", \"WriteConditionsFlag\" : \"WriteConditions\", \"MultiFileFlag\" : \"SingleFile\" }, \"file_label\" :", "= \"sod_shock_tube_test\" settings_filename = \"ProjectParameters.json\" # Read the simulation settings", "the reference values output process to the test settings if", "1.0, \"body_output\" : true, \"node_output\" : false, \"skin_output\" : false,", "import KratosMultiphysics.kratos_utilities as KratosUtilities from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis class SodShockTubeTest(KratosUnittest.TestCase):", "testSodShockTubeExplicitASGS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = False self.shock_capturing = False", "1.0e-10 self.work_folder = \"sod_shock_tube_test\" settings_filename = \"ProjectParameters.json\" # Read the", "self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss", "else \"_OSS\", \"_SC\" if self.shock_capturing else \"\") gid_output_settings[\"Parameters\"][\"output_name\"].SetString(output_name) self.parameters[\"output_processes\"][\"gid_output\"].Append(gid_output_settings) def", "False self.check_absolute_tolerance = 1.0e-8 self.check_relative_tolerance = 1.0e-10 self.work_folder = \"sod_shock_tube_test\"", "[\"ARTIFICIAL_BULK_VISCOSITY\",\"ARTIFICIAL_CONDUCTIVITY\",\"ARTIFICIAL_DYNAMIC_VISCOSITY\"] }, \"point_data_configuration\" : [] } } }\"\"\") output_name =", "to the test settings if self.print_reference_values: self._AddReferenceValuesOutput() else: self._AddReferenceValuesCheck() #", "def testSodShockTubeExplicitOSS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = True self.shock_capturing =", "self.work_folder = \"sod_shock_tube_test\" settings_filename = \"ProjectParameters.json\" # Read the simulation", "self.check_absolute_tolerance = 1.0e-8 self.check_relative_tolerance = 1.0e-10 self.work_folder = \"sod_shock_tube_test\" settings_filename", "self.solver_type = \"CompressibleExplicit\" self.use_oss = False self.shock_capturing = True self._CustomizeSimulationSettings()", "test settings if self.print_output: self._AddOutput() # If required, add the", "def testSodShockTubeExplicitASGS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = False self.shock_capturing =", "KratosMultiphysics.kratos_utilities as KratosUtilities from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import FluidDynamicsAnalysis class SodShockTubeTest(KratosUnittest.TestCase): def", "setUp(self): self.print_output = False self.print_reference_values = False self.check_absolute_tolerance = 1.0e-8", "for GiD\", \"Parameters\" : { \"model_part_name\" : \"FluidModelPart\", \"output_name\" :", "\"nodal_nonhistorical_results\" : [\"ARTIFICIAL_BULK_VISCOSITY\",\"ARTIFICIAL_CONDUCTIVITY\",\"ARTIFICIAL_DYNAMIC_VISCOSITY\"] }, \"point_data_configuration\" : [] } } }\"\"\")", "def testSodShockTubeExplicitASGSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = False self.shock_capturing =", "with KratosUnittest.WorkFolderScope(self.work_folder,__file__): with open(settings_filename,'r') as parameter_file: self.parameters = KratosMultiphysics.Parameters(parameter_file.read()) def", "\"JsonOutputProcess\", \"Parameters\" : { \"output_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"output_file_name\" : \"TO_BE_DEFINED\",", ": [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"output_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"time_frequency\" :", "\"process_name\" : \"GiDOutputProcess\", \"help\" : \"This process writes postprocessing files", "\"_OSS\", \"_SC\" if self.shock_capturing else \"\") json_check_settings[\"Parameters\"][\"input_file_name\"].SetString(input_file_name) json_check_settings[\"Parameters\"][\"tolerance\"].SetDouble(self.check_absolute_tolerance) json_check_settings[\"Parameters\"][\"relative_tolerance\"].SetDouble(self.check_relative_tolerance) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_check_settings)", "import KratosMultiphysics.KratosUnittest as KratosUnittest import KratosMultiphysics.kratos_utilities as KratosUtilities from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis", "tearDown(self): with KratosUnittest.WorkFolderScope(self.work_folder, __file__): KratosUtilities.DeleteFileIfExisting('sod_shock_tube_geom_coarse.time') def _CustomizeSimulationSettings(self): # Customize simulation", ": \"step\", \"output_frequency\" : 1.0, \"body_output\" : true, \"node_output\" :", ": \"FromJsonCheckResultProcess\", \"Parameters\" : { \"check_variables\" : [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"input_file_name\" :", "self.print_reference_values = False self.check_absolute_tolerance = 1.0e-8 self.check_relative_tolerance = 1.0e-10 self.work_folder", "\"GiD_PostBinary\", \"WriteDeformedMeshFlag\" : \"WriteDeformed\", \"WriteConditionsFlag\" : \"WriteConditions\", \"MultiFileFlag\" : \"SingleFile\"", "False else \"_OSS\", \"_SC\" if self.shock_capturing else \"\") json_check_settings[\"Parameters\"][\"input_file_name\"].SetString(input_file_name) json_check_settings[\"Parameters\"][\"tolerance\"].SetDouble(self.check_absolute_tolerance)", "postprocessing files for GiD\", \"Parameters\" : { \"model_part_name\" : \"FluidModelPart\",", "json_check_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"from_json_check_result_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\"", "with KratosUnittest.WorkFolderScope(self.work_folder,__file__): self.model = KratosMultiphysics.Model() simulation = FluidDynamicsAnalysis(self.model, self.parameters) simulation.Run()", "\"step\", \"output_frequency\" : 1.0, \"body_output\" : true, \"node_output\" : false,", "\"\") json_output_settings[\"Parameters\"][\"output_file_name\"].SetString(output_file_name) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_output_settings) def _AddReferenceValuesCheck(self): json_check_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" :", ": true, \"node_output\" : false, \"skin_output\" : false, \"plane_output\" :", "\"SingleFile\" }, \"file_label\" : \"step\", \"output_control_type\" : \"step\", \"output_frequency\" :", "# If required, add the output process to the test", "{ \"result_file_configuration\" : { \"gidpost_flags\" : { \"GiDPostMode\" : \"GiD_PostBinary\",", "{ \"GiDPostMode\" : \"GiD_PostBinary\", \"WriteDeformedMeshFlag\" : \"WriteDeformed\", \"WriteConditionsFlag\" : \"WriteConditions\",", "self.shock_capturing else \"\") json_output_settings[\"Parameters\"][\"output_file_name\"].SetString(output_file_name) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_output_settings) def _AddReferenceValuesCheck(self): json_check_settings = KratosMultiphysics.Parameters(\"\"\"{", "self.shock_capturing = True self._CustomizeSimulationSettings() def setUp(self): self.print_output = False self.print_reference_values", "process to the test settings if self.print_reference_values: self._AddReferenceValuesOutput() else: self._AddReferenceValuesCheck()", "[\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"input_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"tolerance\" : 0.0,", "\"input_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"tolerance\" : 0.0, \"relative_tolerance\"", "else \"\") gid_output_settings[\"Parameters\"][\"output_name\"].SetString(output_name) self.parameters[\"output_processes\"][\"gid_output\"].Append(gid_output_settings) def _AddReferenceValuesOutput(self): json_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\"", "\"ProjectParameters.json\" # Read the simulation settings with KratosUnittest.WorkFolderScope(self.work_folder,__file__): with open(settings_filename,'r')", "\"output_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"time_frequency\" : 0.025 }", "= \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\" else \"_implicit\", \"_ASGS\"", "\"FluidModelPart.FluidParts_Fluid\", \"tolerance\" : 0.0, \"relative_tolerance\" : 0.0, \"time_frequency\" : 0.025", "KratosMultiphysics.KratosUnittest as KratosUnittest import KratosMultiphysics.kratos_utilities as KratosUtilities from KratosMultiphysics.FluidDynamicsApplication.fluid_dynamics_analysis import", "KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"gid_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"GiDOutputProcess\",", "else \"_implicit\", \"_ASGS\" if self.use_oss == False else \"_OSS\", \"_SC\"", ": \"KratosMultiphysics\", \"process_name\" : \"JsonOutputProcess\", \"Parameters\" : { \"output_variables\" :", "{ \"gidpost_flags\" : { \"GiDPostMode\" : \"GiD_PostBinary\", \"WriteDeformedMeshFlag\" : \"WriteDeformed\",", "True self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = True", "self.use_oss = True self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSSShockCapturing(self): self.solver_type", "self.print_output = False self.print_reference_values = False self.check_absolute_tolerance = 1.0e-8 self.check_relative_tolerance", "as parameter_file: self.parameters = KratosMultiphysics.Parameters(parameter_file.read()) def runTest(self): # If required,", "false, \"skin_output\" : false, \"plane_output\" : [], \"nodal_results\" : [\"DENSITY\",\"MOMENTUM\",\"TOTAL_ENERGY\"],", "}, \"point_data_configuration\" : [] } } }\"\"\") output_name = \"sod_shock_tube{0}{1}{2}\".format(", "\"Parameters\" : { \"model_part_name\" : \"FluidModelPart\", \"output_name\" : \"TO_BE_DEFINED\", \"postprocess_parameters\"", "self.print_reference_values: self._AddReferenceValuesOutput() else: self._AddReferenceValuesCheck() # Create the test simulation with", "reference values output process to the test settings if self.print_reference_values:", ": { \"model_part_name\" : \"FluidModelPart\", \"output_name\" : \"TO_BE_DEFINED\", \"postprocess_parameters\" :", "false, \"plane_output\" : [], \"nodal_results\" : [\"DENSITY\",\"MOMENTUM\",\"TOTAL_ENERGY\"], \"gauss_point_results\" : [\"SHOCK_SENSOR\",\"THERMAL_SENSOR\",\"SHEAR_SENSOR\"],", "Import kratos core and applications import KratosMultiphysics import KratosMultiphysics.KratosUnittest as", "test = SodShockTubeTest() test.setUp() # test.testSodShockTubeExplicitASGS() test.testSodShockTubeExplicitASGSShockCapturing() # test.testSodShockTubeExplicitOSS() #", "[\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"output_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"time_frequency\" : 0.025", "self.solver_type = \"CompressibleExplicit\" self.use_oss = True self.shock_capturing = True self._CustomizeSimulationSettings()", "'__main__': test = SodShockTubeTest() test.setUp() # test.testSodShockTubeExplicitASGS() test.testSodShockTubeExplicitASGSShockCapturing() # test.testSodShockTubeExplicitOSS()", "\"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"FromJsonCheckResultProcess\", \"Parameters\" : { \"check_variables\"", "\"python_module\" : \"gid_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"GiDOutputProcess\", \"help\"", "the test settings if self.print_reference_values: self._AddReferenceValuesOutput() else: self._AddReferenceValuesCheck() # Create", "KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"from_json_check_result_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" : \"FromJsonCheckResultProcess\",", "json_check_settings[\"Parameters\"][\"input_file_name\"].SetString(input_file_name) json_check_settings[\"Parameters\"][\"tolerance\"].SetDouble(self.check_absolute_tolerance) json_check_settings[\"Parameters\"][\"relative_tolerance\"].SetDouble(self.check_relative_tolerance) self.parameters[\"processes\"][\"json_check_process_list\"].Append(json_check_settings) if __name__ == '__main__': test =", "= KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"gid_output_process\", \"kratos_module\" : \"KratosMultiphysics\", \"process_name\" :", "add the reference values output process to the test settings", "self._AddReferenceValuesCheck() # Create the test simulation with KratosUnittest.WorkFolderScope(self.work_folder,__file__): self.model =", "SodShockTubeTest() test.setUp() # test.testSodShockTubeExplicitASGS() test.testSodShockTubeExplicitASGSShockCapturing() # test.testSodShockTubeExplicitOSS() # test.testSodShockTubeExplicitOSSShockCapturing() test.runTest()", "testSodShockTubeExplicitOSSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = True self.shock_capturing = True", "settings if self.print_output: self._AddOutput() # If required, add the reference", "self.shock_capturing = False self._CustomizeSimulationSettings() def testSodShockTubeExplicitASGSShockCapturing(self): self.solver_type = \"CompressibleExplicit\" self.use_oss", "\"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type == \"CompressibleExplicit\" else \"_implicit\", \"_ASGS\" if", "\"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"tolerance\" : 0.0, \"relative_tolerance\" : 0.0,", "Read the simulation settings with KratosUnittest.WorkFolderScope(self.work_folder,__file__): with open(settings_filename,'r') as parameter_file:", ": { \"GiDPostMode\" : \"GiD_PostBinary\", \"WriteDeformedMeshFlag\" : \"WriteDeformed\", \"WriteConditionsFlag\" :", "self.check_relative_tolerance = 1.0e-10 self.work_folder = \"sod_shock_tube_test\" settings_filename = \"ProjectParameters.json\" #", "# If required, add the reference values output process to", ": 1.0, \"body_output\" : true, \"node_output\" : false, \"skin_output\" :", "_AddReferenceValuesOutput(self): json_output_settings = KratosMultiphysics.Parameters(\"\"\"{ \"python_module\" : \"json_output_process\", \"kratos_module\" : \"KratosMultiphysics\",", ": \"GiD_PostBinary\", \"WriteDeformedMeshFlag\" : \"WriteDeformed\", \"WriteConditionsFlag\" : \"WriteConditions\", \"MultiFileFlag\" :", "\"output_control_type\" : \"step\", \"output_frequency\" : 1.0, \"body_output\" : true, \"node_output\"", ": 0.025 } }\"\"\") output_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format( \"_explicit\" if self.solver_type", "}, \"file_label\" : \"step\", \"output_control_type\" : \"step\", \"output_frequency\" : 1.0,", "self.solver_type = \"CompressibleExplicit\" self.use_oss = False self.shock_capturing = False self._CustomizeSimulationSettings()", "\"tolerance\" : 0.0, \"relative_tolerance\" : 0.0, \"time_frequency\" : 0.025 }", ": 0.0, \"time_frequency\" : 0.025 } }\"\"\") input_file_name = \"sod_shock_tube{0}{1}{2}_results.json\".format(", "_CustomizeSimulationSettings(self): # Customize simulation settings self.parameters[\"solver_settings\"][\"solver_type\"].SetString(self.solver_type) self.parameters[\"solver_settings\"][\"use_oss\"].SetBool(self.use_oss) self.parameters[\"solver_settings\"][\"shock_capturing\"].SetBool(self.shock_capturing) def _AddOutput(self):", ": [\"DENSITY\",\"MOMENTUM_X\",\"MOMENTUM_Y\",\"TOTAL_ENERGY\"], \"input_file_name\" : \"TO_BE_DEFINED\", \"model_part_name\" : \"FluidModelPart.FluidParts_Fluid\", \"tolerance\" :", "\"model_part_name\" : \"FluidModelPart\", \"output_name\" : \"TO_BE_DEFINED\", \"postprocess_parameters\" : { \"result_file_configuration\"", "= KratosMultiphysics.Parameters(parameter_file.read()) def runTest(self): # If required, add the output", "self._CustomizeSimulationSettings() def testSodShockTubeExplicitOSS(self): self.solver_type = \"CompressibleExplicit\" self.use_oss = True self.shock_capturing", "files for GiD\", \"Parameters\" : { \"model_part_name\" : \"FluidModelPart\", \"output_name\"" ]
[ "import CateBCommMAC from .cate_broadcast_comm_controller_full import CateBCommFMAC from .cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC", "import CateBCommNIBMAC from .tar_comm_controller import TarCommMAC from .cate_pruned_broadcast_comm_controller import CatePBCommMAC", "import CateBCommFMAC from .cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC from .tar_comm_controller import TarCommMAC", "TarCommMAC from .cate_pruned_broadcast_comm_controller import CatePBCommMAC REGISTRY = {\"basic_mac\": BasicMAC, \"cate_broadcast_comm_mac\":", "from .cate_broadcast_comm_controller import CateBCommMAC from .cate_broadcast_comm_controller_full import CateBCommFMAC from .cate_broadcast_comm_controller_not_IB", ".cate_broadcast_comm_controller import CateBCommMAC from .cate_broadcast_comm_controller_full import CateBCommFMAC from .cate_broadcast_comm_controller_not_IB import", "from .cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC from .tar_comm_controller import TarCommMAC from .cate_pruned_broadcast_comm_controller", ".tar_comm_controller import TarCommMAC from .cate_pruned_broadcast_comm_controller import CatePBCommMAC REGISTRY = {\"basic_mac\":", ".cate_broadcast_comm_controller_full import CateBCommFMAC from .cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC from .tar_comm_controller import", "from .tar_comm_controller import TarCommMAC from .cate_pruned_broadcast_comm_controller import CatePBCommMAC REGISTRY =", "BasicMAC, \"cate_broadcast_comm_mac\": CateBCommMAC, \"cate_broadcast_comm_mac_full\": CateBCommFMAC, \"cate_broadcast_comm_mac_not_IB\": CateBCommNIBMAC, \"tar_comm_mac\": TarCommMAC, \"cate_pruned_broadcast_comm_mac\":", "CateBCommNIBMAC from .tar_comm_controller import TarCommMAC from .cate_pruned_broadcast_comm_controller import CatePBCommMAC REGISTRY", ".basic_controller import BasicMAC from .cate_broadcast_comm_controller import CateBCommMAC from .cate_broadcast_comm_controller_full import", ".cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC from .tar_comm_controller import TarCommMAC from .cate_pruned_broadcast_comm_controller import", "BasicMAC from .cate_broadcast_comm_controller import CateBCommMAC from .cate_broadcast_comm_controller_full import CateBCommFMAC from", "REGISTRY = {\"basic_mac\": BasicMAC, \"cate_broadcast_comm_mac\": CateBCommMAC, \"cate_broadcast_comm_mac_full\": CateBCommFMAC, \"cate_broadcast_comm_mac_not_IB\": CateBCommNIBMAC,", "from .cate_broadcast_comm_controller_full import CateBCommFMAC from .cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC from .tar_comm_controller", "import TarCommMAC from .cate_pruned_broadcast_comm_controller import CatePBCommMAC REGISTRY = {\"basic_mac\": BasicMAC,", "from .basic_controller import BasicMAC from .cate_broadcast_comm_controller import CateBCommMAC from .cate_broadcast_comm_controller_full", "from .cate_pruned_broadcast_comm_controller import CatePBCommMAC REGISTRY = {\"basic_mac\": BasicMAC, \"cate_broadcast_comm_mac\": CateBCommMAC,", "= {\"basic_mac\": BasicMAC, \"cate_broadcast_comm_mac\": CateBCommMAC, \"cate_broadcast_comm_mac_full\": CateBCommFMAC, \"cate_broadcast_comm_mac_not_IB\": CateBCommNIBMAC, \"tar_comm_mac\":", "import CatePBCommMAC REGISTRY = {\"basic_mac\": BasicMAC, \"cate_broadcast_comm_mac\": CateBCommMAC, \"cate_broadcast_comm_mac_full\": CateBCommFMAC,", "CatePBCommMAC REGISTRY = {\"basic_mac\": BasicMAC, \"cate_broadcast_comm_mac\": CateBCommMAC, \"cate_broadcast_comm_mac_full\": CateBCommFMAC, \"cate_broadcast_comm_mac_not_IB\":", "CateBCommMAC from .cate_broadcast_comm_controller_full import CateBCommFMAC from .cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC from", "\"cate_broadcast_comm_mac\": CateBCommMAC, \"cate_broadcast_comm_mac_full\": CateBCommFMAC, \"cate_broadcast_comm_mac_not_IB\": CateBCommNIBMAC, \"tar_comm_mac\": TarCommMAC, \"cate_pruned_broadcast_comm_mac\": CatePBCommMAC}", ".cate_pruned_broadcast_comm_controller import CatePBCommMAC REGISTRY = {\"basic_mac\": BasicMAC, \"cate_broadcast_comm_mac\": CateBCommMAC, \"cate_broadcast_comm_mac_full\":", "import BasicMAC from .cate_broadcast_comm_controller import CateBCommMAC from .cate_broadcast_comm_controller_full import CateBCommFMAC", "CateBCommFMAC from .cate_broadcast_comm_controller_not_IB import CateBCommNIBMAC from .tar_comm_controller import TarCommMAC from", "{\"basic_mac\": BasicMAC, \"cate_broadcast_comm_mac\": CateBCommMAC, \"cate_broadcast_comm_mac_full\": CateBCommFMAC, \"cate_broadcast_comm_mac_not_IB\": CateBCommNIBMAC, \"tar_comm_mac\": TarCommMAC," ]
[ "logging import websockets logging.basicConfig() async def counter(websocket, path): try: print(\"connect\")", "async def main(): async with websockets.serve(counter, \"localhost\", 5000): await asyncio.Future()", "import asyncio import json import logging import websockets logging.basicConfig() async", "async for message in websocket: print(message) finally: USERS.remove(websocket) async def", "def counter(websocket, path): try: print(\"connect\") async for message in websocket:", "print(message) finally: USERS.remove(websocket) async def main(): async with websockets.serve(counter, \"localhost\",", "asyncio import json import logging import websockets logging.basicConfig() async def", "await asyncio.Future() # run forever if __name__ == \"__main__\": asyncio.run(main())", "\"localhost\", 5000): await asyncio.Future() # run forever if __name__ ==", "5000): await asyncio.Future() # run forever if __name__ == \"__main__\":", "print(\"connect\") async for message in websocket: print(message) finally: USERS.remove(websocket) async", "USERS.remove(websocket) async def main(): async with websockets.serve(counter, \"localhost\", 5000): await", "in websocket: print(message) finally: USERS.remove(websocket) async def main(): async with", "websocket: print(message) finally: USERS.remove(websocket) async def main(): async with websockets.serve(counter,", "finally: USERS.remove(websocket) async def main(): async with websockets.serve(counter, \"localhost\", 5000):", "main(): async with websockets.serve(counter, \"localhost\", 5000): await asyncio.Future() # run", "logging.basicConfig() async def counter(websocket, path): try: print(\"connect\") async for message", "counter(websocket, path): try: print(\"connect\") async for message in websocket: print(message)", "for message in websocket: print(message) finally: USERS.remove(websocket) async def main():", "message in websocket: print(message) finally: USERS.remove(websocket) async def main(): async", "def main(): async with websockets.serve(counter, \"localhost\", 5000): await asyncio.Future() #", "async def counter(websocket, path): try: print(\"connect\") async for message in", "json import logging import websockets logging.basicConfig() async def counter(websocket, path):", "async with websockets.serve(counter, \"localhost\", 5000): await asyncio.Future() # run forever", "import logging import websockets logging.basicConfig() async def counter(websocket, path): try:", "with websockets.serve(counter, \"localhost\", 5000): await asyncio.Future() # run forever if", "websockets.serve(counter, \"localhost\", 5000): await asyncio.Future() # run forever if __name__", "path): try: print(\"connect\") async for message in websocket: print(message) finally:", "import websockets logging.basicConfig() async def counter(websocket, path): try: print(\"connect\") async", "try: print(\"connect\") async for message in websocket: print(message) finally: USERS.remove(websocket)", "websockets logging.basicConfig() async def counter(websocket, path): try: print(\"connect\") async for", "import json import logging import websockets logging.basicConfig() async def counter(websocket," ]
[ "outputs = Conv3D(4, (1,1,1), activation='softmax')(c10) model = Model(inputs=input_img, outputs=outputs) return", "load import numpy as np #import cv2 import nibabel as", "= add([input_mat,X]); return X def Vnet_3d(input_img, n_filters = 8, dropout", "= Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X) if batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X)", "imread, imshow, concatenate_images from skimage.transform import resize from sklearn.utils import", "import pandas as pd import numpy as np import matplotlib.pyplot", "import concatenate, add from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from", "concatenate_images from skimage.transform import resize from sklearn.utils import class_weight from", "True): #c1 = conv_block(input_img,n_filters,3,batch_norm) c1 = Conv3D(n_filters,kernel_size = (5,5,5) ,", "as K from keras.utils import to_categorical from keras import metrics", "(2,2,2) , padding='same')(c6) p7 = conv_block(p6,n_filters*16,5,True) u6 = Conv3DTranspose(n_filters*8, (2,2,2),", "= concatenate([u7,c4]); c8 = conv_block(u8,n_filters*8,5,True) c8 = Dropout(dropout)(c8) u9 =", "os from skimage.io import imread, imshow, concatenate_images from skimage.transform import", "import load import numpy as np #import cv2 import nibabel", "import numpy as np #import cv2 import nibabel as nib", "import ModelCheckpoint from keras.callbacks import CSVLogger from keras.callbacks import EarlyStopping", "padding = 'same')(c3) p3 = Dropout(dropout)(p3) c4 = conv_block(p3, n_filters*4,5,True)", "GlobalMaxPool2D,MaxPooling3D from keras.layers.merge import concatenate, add from keras.callbacks import EarlyStopping,", "from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D", "= Dropout(dropout)(p4) c5 = conv_block(p4, n_filters*8,5,True) p6 = Conv3D(n_filters*16,kernel_size =", "(2,2,2) , strides = (2,2,2) , padding='same')(c6) p7 = conv_block(p6,n_filters*16,5,True)", "= conv_block(c2 , n_filters*2,5,True) p3 = Conv3D(n_filters*4,kernel_size = (2,2,2) ,", "c3 = conv_block(c2 , n_filters*2,5,True) p3 = Conv3D(n_filters*4,kernel_size = (2,2,2)", "import Image def conv_block(input_mat,num_filters,kernel_size,batch_norm): X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat) if batch_norm: X", "n_filters*4,5,True) p4 = Conv3D(n_filters*8,kernel_size = (2,2,2) , strides = (2,2,2)", "#p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2) ,", "conv_block(input_mat,num_filters,kernel_size,batch_norm): X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat) if batch_norm: X = BatchNormalization()(X) X", "c9 = Dropout(dropout)(c9) u10 = Conv3DTranspose(n_filters,(2,2,2),strides = (2,2,2) , padding=", "conv_block(c2 , n_filters*2,5,True) p3 = Conv3D(n_filters*4,kernel_size = (2,2,2) , strides", "= add([c1,input_img]) c2 = Conv3D(n_filters*2,kernel_size = (2,2,2) , strides =", "= 'same')(c3) p3 = Dropout(dropout)(p3) c4 = conv_block(p3, n_filters*4,5,True) p4", "padding= 'same')(c8); u9 = concatenate([u9,c3]); c9 = conv_block(u9,n_filters*4,5,True) c9 =", "p7 = conv_block(p6,n_filters*16,5,True) u6 = Conv3DTranspose(n_filters*8, (2,2,2), strides=(2, 2, 2),", "ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator,", "= conv_block(p5, n_filters*8,5,True) #p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides", "import Input, BatchNormalization, Activation, Dense, Dropout,Maximum from keras.layers.core import Lambda,", "as pd import numpy as np import matplotlib.pyplot as plt", "array_to_img, img_to_array, load_img from skimage.io import imread, imshow, concatenate_images from", "(2,2,2) , padding= 'same')(c9); u10 = concatenate([u10,c1]); c10 = Conv3D(n_filters*2,kernel_size", "(2,2,2) , strides = (2,2,2) , padding='same')(c5) p6 = Dropout(dropout)(p6)", "strides = (2,2,2) , padding = 'same' )(c1) c3 =", "X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X) if batch_norm: X = BatchNormalization()(X) X =", "'same')(c7); u8 = concatenate([u7,c4]); c8 = conv_block(u8,n_filters*8,5,True) c8 = Dropout(dropout)(c8)", "= conv_block(p6,n_filters*16,5,True) u6 = Conv3DTranspose(n_filters*8, (2,2,2), strides=(2, 2, 2), padding='same')(p7);", "= Conv3DTranspose(n_filters*8, (2,2,2), strides=(2, 2, 2), padding='same')(p7); u6 = concatenate([u6,c5]);", "Input, BatchNormalization, Activation, Dense, Dropout,Maximum from keras.layers.core import Lambda, RepeatVector,", "import imread, imshow, concatenate_images from skimage.transform import resize # from", "from skimage.transform import resize from sklearn.utils import class_weight from keras.callbacks", "import keras.backend as K from keras.utils import to_categorical from keras", "RepeatVector, Reshape from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose from keras.layers.pooling import", "Conv3D(n_filters*8,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c4) p4", "from keras import metrics from keras.models import Model, load_model from", "0.2, batch_norm = True): #c1 = conv_block(input_img,n_filters,3,batch_norm) c1 = Conv3D(n_filters,kernel_size", "Activation('relu')(X) X = add([input_mat,X]); return X def Vnet_3d(input_img, n_filters =", "BatchNormalization, Activation, Dense, Dropout,Maximum from keras.layers.core import Lambda, RepeatVector, Reshape", "import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D from keras.layers.merge", "from PIL import Image def conv_block(input_mat,num_filters,kernel_size,batch_norm): X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat) if", "skimage.transform import resize # from medpy.io import load import numpy", "p3 = Dropout(dropout)(p3) c4 = conv_block(p3, n_filters*4,5,True) p4 = Conv3D(n_filters*8,kernel_size", "= (2,2,2) , padding = 'same' )(c1) c3 = conv_block(c2", "= Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c6)", "u9 = concatenate([u9,c3]); c9 = conv_block(u9,n_filters*4,5,True) c9 = Dropout(dropout)(c9) u10", "conv_block(u8,n_filters*8,5,True) c8 = Dropout(dropout)(c8) u9 = Conv3DTranspose(n_filters*2,(2,2,2),strides = (2,2,2) ,", "K from keras.utils import to_categorical from keras import metrics from", "c10 = Conv3D(n_filters*2,kernel_size = (5,5,5),strides = (1,1,1) , padding =", "<filename>3d_Vnet/3dvnet.py import random import pandas as pd import numpy as", "keras import metrics from keras.models import Model, load_model from keras.layers", "#c6 = conv_block(p5, n_filters*8,5,True) #p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) ,", "= Conv3D(n_filters*4,kernel_size = (2,2,2) , strides = (2,2,2), padding =", "'same')(c8); u9 = concatenate([u9,c3]); c9 = conv_block(u9,n_filters*4,5,True) c9 = Dropout(dropout)(c9)", "2), padding='same')(p7); u6 = concatenate([u6,c5]); c7 = conv_block(u6,n_filters*16,5,True) c7 =", "return X def Vnet_3d(input_img, n_filters = 8, dropout = 0.2,", "= (2,2,2) , padding= 'same')(c9); u10 = concatenate([u10,c1]); c10 =", "Activation, Dense, Dropout,Maximum from keras.layers.core import Lambda, RepeatVector, Reshape from", "imread, imshow, concatenate_images from skimage.transform import resize # from medpy.io", "Conv2DTranspose,Conv3D,Conv3DTranspose from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D from keras.layers.merge import concatenate,", "= add([c10,u10]); #c9 = conv_block(u9,n_filters,3,batch_norm) outputs = Conv3D(4, (1,1,1), activation='softmax')(c10)", "import numpy as np import matplotlib.pyplot as plt #%matplotlib inline", "concatenate([u7,c4]); c8 = conv_block(u8,n_filters*8,5,True) c8 = Dropout(dropout)(c8) u9 = Conv3DTranspose(n_filters*2,(2,2,2),strides", "X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat) if batch_norm: X = BatchNormalization()(X) X =", "= (2,2,2) , padding='same')(c4) p4 = Dropout(dropout)(p4) c5 = conv_block(p4,", "MaxPooling2D, GlobalMaxPool2D,MaxPooling3D from keras.layers.merge import concatenate, add from keras.callbacks import", "'same')(c9); u10 = concatenate([u10,c1]); c10 = Conv3D(n_filters*2,kernel_size = (5,5,5),strides =", "(2,2,2) , padding='same')(c5) p6 = Dropout(dropout)(p6) #c6 = conv_block(p5, n_filters*8,5,True)", "= concatenate([u9,c3]); c9 = conv_block(u9,n_filters*4,5,True) c9 = Dropout(dropout)(c9) u10 =", "import CSVLogger from keras.callbacks import EarlyStopping from keras.layers.advanced_activations import PReLU", "Dropout(dropout)(p4) c5 = conv_block(p4, n_filters*8,5,True) p6 = Conv3D(n_filters*16,kernel_size = (2,2,2)", "= Dropout(dropout)(c10) c10 = add([c10,u10]); #c9 = conv_block(u9,n_filters,3,batch_norm) outputs =", "conv_block(u9,n_filters,3,batch_norm) outputs = Conv3D(4, (1,1,1), activation='softmax')(c10) model = Model(inputs=input_img, outputs=outputs)", "u6 = concatenate([u6,c5]); c7 = conv_block(u6,n_filters*16,5,True) c7 = Dropout(dropout)(c7) u7", "keras.callbacks import EarlyStopping from keras.layers.advanced_activations import PReLU import os from", "X def Vnet_3d(input_img, n_filters = 8, dropout = 0.2, batch_norm", "keras.layers import Input, BatchNormalization, Activation, Dense, Dropout,Maximum from keras.layers.core import", "skimage.io import imread, imshow, concatenate_images from skimage.transform import resize #", "= Activation('relu')(X) X = add([input_mat,X]); return X def Vnet_3d(input_img, n_filters", "(2,2,2) , padding= 'same')(c8); u9 = concatenate([u9,c3]); c9 = conv_block(u9,n_filters*4,5,True)", ", padding= 'same')(c8); u9 = concatenate([u9,c3]); c9 = conv_block(u9,n_filters*4,5,True) c9", "keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D from keras.layers.merge import concatenate, add from", "= Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat) if batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X)", "import ImageDataGenerator, array_to_img, img_to_array, load_img from skimage.io import imread, imshow,", "#import cv2 import nibabel as nib from PIL import Image", "= True): #c1 = conv_block(input_img,n_filters,3,batch_norm) c1 = Conv3D(n_filters,kernel_size = (5,5,5)", "u8 = concatenate([u7,c4]); c8 = conv_block(u8,n_filters*8,5,True) c8 = Dropout(dropout)(c8) u9", "medpy.io import load import numpy as np #import cv2 import", "if batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X) X =", "= Conv3D(n_filters*8,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c4)", ", strides = (2,2,2) , padding='same')(c5) p6 = Dropout(dropout)(p6) #c6", "import Adam from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from", "= Dropout(dropout)(c7) u7 = Conv3DTranspose(n_filters*4,(2,2,2),strides = (2,2,2) , padding= 'same')(c7);", "padding='same')(c4) p4 = Dropout(dropout)(p4) c5 = conv_block(p4, n_filters*8,5,True) p6 =", "dropout = 0.2, batch_norm = True): #c1 = conv_block(input_img,n_filters,3,batch_norm) c1", "padding='same')(input_img) #c1 = add([c1,input_img]) c2 = Conv3D(n_filters*2,kernel_size = (2,2,2) ,", "import PReLU import os from skimage.io import imread, imshow, concatenate_images", "concatenate([u6,c5]); c7 = conv_block(u6,n_filters*16,5,True) c7 = Dropout(dropout)(c7) u7 = Conv3DTranspose(n_filters*4,(2,2,2),strides", ", strides = (2,2,2), padding = 'same')(c3) p3 = Dropout(dropout)(p3)", "import Lambda, RepeatVector, Reshape from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose from", "X = add([input_mat,X]); return X def Vnet_3d(input_img, n_filters = 8,", "= Conv3DTranspose(n_filters,(2,2,2),strides = (2,2,2) , padding= 'same')(c9); u10 = concatenate([u10,c1]);", "img_to_array, load_img from skimage.io import imread, imshow, concatenate_images from skimage.transform", "Activation('relu')(X) X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X) if batch_norm: X = BatchNormalization()(X) X", "Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat) if batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X) X", "= Conv3D(n_filters*2,kernel_size = (5,5,5),strides = (1,1,1) , padding = 'same')(u10);", "ImageDataGenerator, array_to_img, img_to_array, load_img from skimage.io import imread, imshow, concatenate_images", "u6 = Conv3DTranspose(n_filters*8, (2,2,2), strides=(2, 2, 2), padding='same')(p7); u6 =", ", padding= 'same')(c7); u8 = concatenate([u7,c4]); c8 = conv_block(u8,n_filters*8,5,True) c8", "PReLU import os from skimage.io import imread, imshow, concatenate_images from", "= 8, dropout = 0.2, batch_norm = True): #c1 =", "= conv_block(u6,n_filters*16,5,True) c7 = Dropout(dropout)(c7) u7 = Conv3DTranspose(n_filters*4,(2,2,2),strides = (2,2,2)", "imshow, concatenate_images from skimage.transform import resize from sklearn.utils import class_weight", "from keras.utils import to_categorical from keras import metrics from keras.models", "padding='same')(c6) p7 = conv_block(p6,n_filters*16,5,True) u6 = Conv3DTranspose(n_filters*8, (2,2,2), strides=(2, 2,", ", strides = (2,2,2) , padding='same')(c4) p4 = Dropout(dropout)(p4) c5", "= (2,2,2) , padding= 'same')(c8); u9 = concatenate([u9,c3]); c9 =", "n_filters*2,5,True) p3 = Conv3D(n_filters*4,kernel_size = (2,2,2) , strides = (2,2,2),", "= BatchNormalization()(X) X = Activation('relu')(X) X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X) if batch_norm:", "class_weight from keras.callbacks import ModelCheckpoint from keras.callbacks import CSVLogger from", "8, dropout = 0.2, batch_norm = True): #c1 = conv_block(input_img,n_filters,3,batch_norm)", "(2,2,2) , padding='same')(c4) p4 = Dropout(dropout)(p4) c5 = conv_block(p4, n_filters*8,5,True)", "def conv_block(input_mat,num_filters,kernel_size,batch_norm): X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat) if batch_norm: X = BatchNormalization()(X)", "= 'same' )(c1) c3 = conv_block(c2 , n_filters*2,5,True) p3 =", "= Dropout(dropout)(c9) u10 = Conv3DTranspose(n_filters,(2,2,2),strides = (2,2,2) , padding= 'same')(c9);", "ReduceLROnPlateau from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator, array_to_img,", "EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import Adam from keras.preprocessing.image import", "conv_block(p6,n_filters*16,5,True) u6 = Conv3DTranspose(n_filters*8, (2,2,2), strides=(2, 2, 2), padding='same')(p7); u6", "keras.layers.merge import concatenate, add from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau", "keras.layers.core import Lambda, RepeatVector, Reshape from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose", "ModelCheckpoint from keras.callbacks import CSVLogger from keras.callbacks import EarlyStopping from", "imshow, concatenate_images from skimage.transform import resize # from medpy.io import", "Dropout(dropout)(c9) u10 = Conv3DTranspose(n_filters,(2,2,2),strides = (2,2,2) , padding= 'same')(c9); u10", "CSVLogger from keras.callbacks import EarlyStopping from keras.layers.advanced_activations import PReLU import", ", strides = (2,2,2) , padding = 'same' )(c1) c3", "Conv3DTranspose(n_filters*2,(2,2,2),strides = (2,2,2) , padding= 'same')(c8); u9 = concatenate([u9,c3]); c9", "= concatenate([u10,c1]); c10 = Conv3D(n_filters*2,kernel_size = (5,5,5),strides = (1,1,1) ,", "p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2) ,", "Dropout(dropout)(p3) c4 = conv_block(p3, n_filters*4,5,True) p4 = Conv3D(n_filters*8,kernel_size = (2,2,2)", "from keras.callbacks import ModelCheckpoint from keras.callbacks import CSVLogger from keras.callbacks", "= (1,1,1) , padding='same')(input_img) #c1 = add([c1,input_img]) c2 = Conv3D(n_filters*2,kernel_size", "tensorflow as tf import keras.backend as K from keras.utils import", "from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import Adam", "Dropout(dropout)(c7) u7 = Conv3DTranspose(n_filters*4,(2,2,2),strides = (2,2,2) , padding= 'same')(c7); u8", "as tf import keras.backend as K from keras.utils import to_categorical", "u10 = Conv3DTranspose(n_filters,(2,2,2),strides = (2,2,2) , padding= 'same')(c9); u10 =", "Conv3D(n_filters*2,kernel_size = (5,5,5),strides = (1,1,1) , padding = 'same')(u10); c10", "add from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import", ", strides = (1,1,1) , padding='same')(input_img) #c1 = add([c1,input_img]) c2", "padding = 'same' )(c1) c3 = conv_block(c2 , n_filters*2,5,True) p3", "= (2,2,2) , strides = (2,2,2) , padding='same')(c4) p4 =", "concatenate_images from skimage.transform import resize # from medpy.io import load", "conv_block(input_img,n_filters,3,batch_norm) c1 = Conv3D(n_filters,kernel_size = (5,5,5) , strides = (1,1,1)", "add([c1,input_img]) c2 = Conv3D(n_filters*2,kernel_size = (2,2,2) , strides = (2,2,2)", "n_filters*8,5,True) #p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2)", "metrics from keras.models import Model, load_model from keras.layers import Input,", "as nib from PIL import Image def conv_block(input_mat,num_filters,kernel_size,batch_norm): X =", "from keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array,", "conv_block(p5, n_filters*8,5,True) #p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides =", "'same')(c3) p3 = Dropout(dropout)(p3) c4 = conv_block(p3, n_filters*4,5,True) p4 =", "from skimage.io import imread, imshow, concatenate_images from skimage.transform import resize", "skimage.transform import resize from sklearn.utils import class_weight from keras.callbacks import", "Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X) if batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X) X", "(2,2,2) , strides = (2,2,2) , padding='same')(c4) p4 = Dropout(dropout)(p4)", "# from medpy.io import load import numpy as np #import", "resize # from medpy.io import load import numpy as np", ", n_filters*2,5,True) p3 = Conv3D(n_filters*4,kernel_size = (2,2,2) , strides =", "= 'same')(u10); c10 = Dropout(dropout)(c10) c10 = add([c10,u10]); #c9 =", "concatenate, add from keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.optimizers", "(2,2,2) , strides = (2,2,2) , padding = 'same' )(c1)", "(2,2,2) , padding = 'same' )(c1) c3 = conv_block(c2 ,", "padding='same')(c5) p6 = Dropout(dropout)(p6) #c6 = conv_block(p5, n_filters*8,5,True) #p6 =", "c10 = Dropout(dropout)(c10) c10 = add([c10,u10]); #c9 = conv_block(u9,n_filters,3,batch_norm) outputs", "import nibabel as nib from PIL import Image def conv_block(input_mat,num_filters,kernel_size,batch_norm):", "BatchNormalization()(X) X = Activation('relu')(X) X = add([input_mat,X]); return X def", "(5,5,5),strides = (1,1,1) , padding = 'same')(u10); c10 = Dropout(dropout)(c10)", "c10 = add([c10,u10]); #c9 = conv_block(u9,n_filters,3,batch_norm) outputs = Conv3D(4, (1,1,1),", "= conv_block(p4, n_filters*8,5,True) p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides", "import resize # from medpy.io import load import numpy as", "#%matplotlib inline import tensorflow as tf import keras.backend as K", "(2,2,2) , padding= 'same')(c7); u8 = concatenate([u7,c4]); c8 = conv_block(u8,n_filters*8,5,True)", "p3 = Conv3D(n_filters*4,kernel_size = (2,2,2) , strides = (2,2,2), padding", "Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c6) p7", "as plt #%matplotlib inline import tensorflow as tf import keras.backend", "Conv3D(n_filters*4,kernel_size = (2,2,2) , strides = (2,2,2), padding = 'same')(c3)", "n_filters = 8, dropout = 0.2, batch_norm = True): #c1", "import class_weight from keras.callbacks import ModelCheckpoint from keras.callbacks import CSVLogger", "Image def conv_block(input_mat,num_filters,kernel_size,batch_norm): X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat) if batch_norm: X =", "(1,1,1) , padding='same')(input_img) #c1 = add([c1,input_img]) c2 = Conv3D(n_filters*2,kernel_size =", "tf import keras.backend as K from keras.utils import to_categorical from", "batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X) X = add([input_mat,X]);", "import metrics from keras.models import Model, load_model from keras.layers import", "padding='same')(p7); u6 = concatenate([u6,c5]); c7 = conv_block(u6,n_filters*16,5,True) c7 = Dropout(dropout)(c7)", "c7 = Dropout(dropout)(c7) u7 = Conv3DTranspose(n_filters*4,(2,2,2),strides = (2,2,2) , padding=", "c2 = Conv3D(n_filters*2,kernel_size = (2,2,2) , strides = (2,2,2) ,", "inline import tensorflow as tf import keras.backend as K from", "c7 = conv_block(u6,n_filters*16,5,True) c7 = Dropout(dropout)(c7) u7 = Conv3DTranspose(n_filters*4,(2,2,2),strides =", "pd import numpy as np import matplotlib.pyplot as plt #%matplotlib", "as np import matplotlib.pyplot as plt #%matplotlib inline import tensorflow", "plt #%matplotlib inline import tensorflow as tf import keras.backend as", "= Dropout(dropout)(p3) c4 = conv_block(p3, n_filters*4,5,True) p4 = Conv3D(n_filters*8,kernel_size =", "(1,1,1) , padding = 'same')(u10); c10 = Dropout(dropout)(c10) c10 =", ", strides = (2,2,2) , padding='same')(c6) p7 = conv_block(p6,n_filters*16,5,True) u6", "keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from skimage.io import imread,", "= Conv3DTranspose(n_filters*2,(2,2,2),strides = (2,2,2) , padding= 'same')(c8); u9 = concatenate([u9,c3]);", "keras.models import Model, load_model from keras.layers import Input, BatchNormalization, Activation,", "load_img from skimage.io import imread, imshow, concatenate_images from skimage.transform import", "Dense, Dropout,Maximum from keras.layers.core import Lambda, RepeatVector, Reshape from keras.layers.convolutional", "Conv3D(n_filters,kernel_size = (5,5,5) , strides = (1,1,1) , padding='same')(input_img) #c1", "pandas as pd import numpy as np import matplotlib.pyplot as", "Model, load_model from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout,Maximum", "(2,2,2), strides=(2, 2, 2), padding='same')(p7); u6 = concatenate([u6,c5]); c7 =", ", padding='same')(c6) p7 = conv_block(p6,n_filters*16,5,True) u6 = Conv3DTranspose(n_filters*8, (2,2,2), strides=(2,", "concatenate([u10,c1]); c10 = Conv3D(n_filters*2,kernel_size = (5,5,5),strides = (1,1,1) , padding", "import random import pandas as pd import numpy as np", "from keras.layers.core import Lambda, RepeatVector, Reshape from keras.layers.convolutional import Conv2D,", "#c1 = add([c1,input_img]) c2 = Conv3D(n_filters*2,kernel_size = (2,2,2) , strides", "import os from skimage.io import imread, imshow, concatenate_images from skimage.transform", "BatchNormalization()(X) X = Activation('relu')(X) X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X) if batch_norm: X", "matplotlib.pyplot as plt #%matplotlib inline import tensorflow as tf import", "import resize from sklearn.utils import class_weight from keras.callbacks import ModelCheckpoint", "strides = (2,2,2) , padding='same')(c6) p7 = conv_block(p6,n_filters*16,5,True) u6 =", "= conv_block(u9,n_filters,3,batch_norm) outputs = Conv3D(4, (1,1,1), activation='softmax')(c10) model = Model(inputs=input_img,", "(5,5,5) , strides = (1,1,1) , padding='same')(input_img) #c1 = add([c1,input_img])", "import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D from keras.layers.merge import concatenate, add from keras.callbacks", "= (2,2,2) , strides = (2,2,2) , padding='same')(c6) p7 =", "keras.utils import to_categorical from keras import metrics from keras.models import", "c9 = conv_block(u9,n_filters*4,5,True) c9 = Dropout(dropout)(c9) u10 = Conv3DTranspose(n_filters,(2,2,2),strides =", "resize from sklearn.utils import class_weight from keras.callbacks import ModelCheckpoint from", "keras.callbacks import ModelCheckpoint from keras.callbacks import CSVLogger from keras.callbacks import", "from keras.layers.advanced_activations import PReLU import os from skimage.io import imread,", "padding = 'same')(u10); c10 = Dropout(dropout)(c10) c10 = add([c10,u10]); #c9", "keras.callbacks import CSVLogger from keras.callbacks import EarlyStopping from keras.layers.advanced_activations import", "'same' )(c1) c3 = conv_block(c2 , n_filters*2,5,True) p3 = Conv3D(n_filters*4,kernel_size", "conv_block(p3, n_filters*4,5,True) p4 = Conv3D(n_filters*8,kernel_size = (2,2,2) , strides =", "n_filters*8,5,True) p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2)", "keras.backend as K from keras.utils import to_categorical from keras import", "keras.layers.advanced_activations import PReLU import os from skimage.io import imread, imshow,", "Dropout(dropout)(p6) #c6 = conv_block(p5, n_filters*8,5,True) #p6 = Conv3D(n_filters*16,kernel_size = (2,2,2)", "= Dropout(dropout)(c8) u9 = Conv3DTranspose(n_filters*2,(2,2,2),strides = (2,2,2) , padding= 'same')(c8);", "c5 = conv_block(p4, n_filters*8,5,True) p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) ,", ", padding = 'same' )(c1) c3 = conv_block(c2 , n_filters*2,5,True)", "import matplotlib.pyplot as plt #%matplotlib inline import tensorflow as tf", "= (2,2,2) , strides = (2,2,2) , padding='same')(c5) p6 =", "import imread, imshow, concatenate_images from skimage.transform import resize from sklearn.utils", "from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout,Maximum from keras.layers.core", "u10 = concatenate([u10,c1]); c10 = Conv3D(n_filters*2,kernel_size = (5,5,5),strides = (1,1,1)", "= (2,2,2) , strides = (2,2,2), padding = 'same')(c3) p3", "as np #import cv2 import nibabel as nib from PIL", "strides = (2,2,2) , padding='same')(c4) p4 = Dropout(dropout)(p4) c5 =", "Lambda, RepeatVector, Reshape from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose from keras.layers.pooling", "sklearn.utils import class_weight from keras.callbacks import ModelCheckpoint from keras.callbacks import", "= concatenate([u6,c5]); c7 = conv_block(u6,n_filters*16,5,True) c7 = Dropout(dropout)(c7) u7 =", "= Conv3D(4, (1,1,1), activation='softmax')(c10) model = Model(inputs=input_img, outputs=outputs) return model", "Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c5) p6", "Conv3DTranspose(n_filters*4,(2,2,2),strides = (2,2,2) , padding= 'same')(c7); u8 = concatenate([u7,c4]); c8", "padding= 'same')(c7); u8 = concatenate([u7,c4]); c8 = conv_block(u8,n_filters*8,5,True) c8 =", "EarlyStopping from keras.layers.advanced_activations import PReLU import os from skimage.io import", "= (5,5,5),strides = (1,1,1) , padding = 'same')(u10); c10 =", "numpy as np #import cv2 import nibabel as nib from", "PIL import Image def conv_block(input_mat,num_filters,kernel_size,batch_norm): X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat) if batch_norm:", "strides = (1,1,1) , padding='same')(input_img) #c1 = add([c1,input_img]) c2 =", "= Conv3D(n_filters*2,kernel_size = (2,2,2) , strides = (2,2,2) , padding", "from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D from keras.layers.merge import concatenate, add", "strides = (2,2,2), padding = 'same')(c3) p3 = Dropout(dropout)(p3) c4", "Dropout(dropout)(c8) u9 = Conv3DTranspose(n_filters*2,(2,2,2),strides = (2,2,2) , padding= 'same')(c8); u9", "padding= 'same')(c9); u10 = concatenate([u10,c1]); c10 = Conv3D(n_filters*2,kernel_size = (5,5,5),strides", "import tensorflow as tf import keras.backend as K from keras.utils", "(2,2,2), padding = 'same')(c3) p3 = Dropout(dropout)(p3) c4 = conv_block(p3,", "from medpy.io import load import numpy as np #import cv2", "from keras.callbacks import CSVLogger from keras.callbacks import EarlyStopping from keras.layers.advanced_activations", "import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import Adam from keras.preprocessing.image", "batch_norm = True): #c1 = conv_block(input_img,n_filters,3,batch_norm) c1 = Conv3D(n_filters,kernel_size =", "u7 = Conv3DTranspose(n_filters*4,(2,2,2),strides = (2,2,2) , padding= 'same')(c7); u8 =", "p4 = Dropout(dropout)(p4) c5 = conv_block(p4, n_filters*8,5,True) p6 = Conv3D(n_filters*16,kernel_size", "strides = (2,2,2) , padding='same')(c5) p6 = Dropout(dropout)(p6) #c6 =", "Dropout(dropout)(c10) c10 = add([c10,u10]); #c9 = conv_block(u9,n_filters,3,batch_norm) outputs = Conv3D(4,", "strides=(2, 2, 2), padding='same')(p7); u6 = concatenate([u6,c5]); c7 = conv_block(u6,n_filters*16,5,True)", "= conv_block(u8,n_filters*8,5,True) c8 = Dropout(dropout)(c8) u9 = Conv3DTranspose(n_filters*2,(2,2,2),strides = (2,2,2)", "c1 = Conv3D(n_filters,kernel_size = (5,5,5) , strides = (1,1,1) ,", ", padding='same')(c4) p4 = Dropout(dropout)(p4) c5 = conv_block(p4, n_filters*8,5,True) p6", "Vnet_3d(input_img, n_filters = 8, dropout = 0.2, batch_norm = True):", ", padding='same')(c5) p6 = Dropout(dropout)(p6) #c6 = conv_block(p5, n_filters*8,5,True) #p6", "np import matplotlib.pyplot as plt #%matplotlib inline import tensorflow as", "def Vnet_3d(input_img, n_filters = 8, dropout = 0.2, batch_norm =", "= (2,2,2) , strides = (2,2,2) , padding = 'same'", "2, 2), padding='same')(p7); u6 = concatenate([u6,c5]); c7 = conv_block(u6,n_filters*16,5,True) c7", "Conv3DTranspose(n_filters*8, (2,2,2), strides=(2, 2, 2), padding='same')(p7); u6 = concatenate([u6,c5]); c7", "u9 = Conv3DTranspose(n_filters*2,(2,2,2),strides = (2,2,2) , padding= 'same')(c8); u9 =", "numpy as np import matplotlib.pyplot as plt #%matplotlib inline import", "'same')(u10); c10 = Dropout(dropout)(c10) c10 = add([c10,u10]); #c9 = conv_block(u9,n_filters,3,batch_norm)", "conv_block(u6,n_filters*16,5,True) c7 = Dropout(dropout)(c7) u7 = Conv3DTranspose(n_filters*4,(2,2,2),strides = (2,2,2) ,", "= BatchNormalization()(X) X = Activation('relu')(X) X = add([input_mat,X]); return X", "#c1 = conv_block(input_img,n_filters,3,batch_norm) c1 = Conv3D(n_filters,kernel_size = (5,5,5) , strides", "conv_block(p4, n_filters*8,5,True) p6 = Conv3D(n_filters*16,kernel_size = (2,2,2) , strides =", "= (2,2,2), padding = 'same')(c3) p3 = Dropout(dropout)(p3) c4 =", "import EarlyStopping from keras.layers.advanced_activations import PReLU import os from skimage.io", "np #import cv2 import nibabel as nib from PIL import", "X = BatchNormalization()(X) X = Activation('relu')(X) X = add([input_mat,X]); return", "skimage.io import imread, imshow, concatenate_images from skimage.transform import resize from", ", padding='same')(input_img) #c1 = add([c1,input_img]) c2 = Conv3D(n_filters*2,kernel_size = (2,2,2)", "keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D from", "p4 = Conv3D(n_filters*8,kernel_size = (2,2,2) , strides = (2,2,2) ,", "Dropout,Maximum from keras.layers.core import Lambda, RepeatVector, Reshape from keras.layers.convolutional import", "= (2,2,2) , padding= 'same')(c7); u8 = concatenate([u7,c4]); c8 =", "concatenate([u9,c3]); c9 = conv_block(u9,n_filters*4,5,True) c9 = Dropout(dropout)(c9) u10 = Conv3DTranspose(n_filters,(2,2,2),strides", "add([c10,u10]); #c9 = conv_block(u9,n_filters,3,batch_norm) outputs = Conv3D(4, (1,1,1), activation='softmax')(c10) model", ", padding = 'same')(u10); c10 = Dropout(dropout)(c10) c10 = add([c10,u10]);", "#c9 = conv_block(u9,n_filters,3,batch_norm) outputs = Conv3D(4, (1,1,1), activation='softmax')(c10) model =", "= conv_block(u9,n_filters*4,5,True) c9 = Dropout(dropout)(c9) u10 = Conv3DTranspose(n_filters,(2,2,2),strides = (2,2,2)", "= 0.2, batch_norm = True): #c1 = conv_block(input_img,n_filters,3,batch_norm) c1 =", "from keras.models import Model, load_model from keras.layers import Input, BatchNormalization,", "from keras.callbacks import EarlyStopping from keras.layers.advanced_activations import PReLU import os", "= (1,1,1) , padding = 'same')(u10); c10 = Dropout(dropout)(c10) c10", "= Activation('relu')(X) X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X) if batch_norm: X = BatchNormalization()(X)", "= (5,5,5) , strides = (1,1,1) , padding='same')(input_img) #c1 =", "Conv3D(n_filters*2,kernel_size = (2,2,2) , strides = (2,2,2) , padding =", "from skimage.transform import resize # from medpy.io import load import", "(2,2,2) , strides = (2,2,2), padding = 'same')(c3) p3 =", "c4 = conv_block(p3, n_filters*4,5,True) p4 = Conv3D(n_filters*8,kernel_size = (2,2,2) ,", "from sklearn.utils import class_weight from keras.callbacks import ModelCheckpoint from keras.callbacks", "Adam from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from skimage.io", "nibabel as nib from PIL import Image def conv_block(input_mat,num_filters,kernel_size,batch_norm): X", "= Conv3D(n_filters*16,kernel_size = (2,2,2) , strides = (2,2,2) , padding='same')(c5)", "to_categorical from keras import metrics from keras.models import Model, load_model", "= conv_block(input_img,n_filters,3,batch_norm) c1 = Conv3D(n_filters,kernel_size = (5,5,5) , strides =", "conv_block(u9,n_filters*4,5,True) c9 = Dropout(dropout)(c9) u10 = Conv3DTranspose(n_filters,(2,2,2),strides = (2,2,2) ,", "add([input_mat,X]); return X def Vnet_3d(input_img, n_filters = 8, dropout =", "random import pandas as pd import numpy as np import", "from keras.layers.merge import concatenate, add from keras.callbacks import EarlyStopping, ModelCheckpoint,", "nib from PIL import Image def conv_block(input_mat,num_filters,kernel_size,batch_norm): X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(input_mat)", "c8 = conv_block(u8,n_filters*8,5,True) c8 = Dropout(dropout)(c8) u9 = Conv3DTranspose(n_filters*2,(2,2,2),strides =", "= conv_block(p3, n_filters*4,5,True) p4 = Conv3D(n_filters*8,kernel_size = (2,2,2) , strides", "= (2,2,2) , padding='same')(c6) p7 = conv_block(p6,n_filters*16,5,True) u6 = Conv3DTranspose(n_filters*8,", ", padding= 'same')(c9); u10 = concatenate([u10,c1]); c10 = Conv3D(n_filters*2,kernel_size =", "= Dropout(dropout)(p6) #c6 = conv_block(p5, n_filters*8,5,True) #p6 = Conv3D(n_filters*16,kernel_size =", "c8 = Dropout(dropout)(c8) u9 = Conv3DTranspose(n_filters*2,(2,2,2),strides = (2,2,2) , padding=", "keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau from keras.optimizers import Adam from", "load_model from keras.layers import Input, BatchNormalization, Activation, Dense, Dropout,Maximum from", "= Conv3D(n_filters,kernel_size = (5,5,5) , strides = (1,1,1) , padding='same')(input_img)", "import Model, load_model from keras.layers import Input, BatchNormalization, Activation, Dense,", "= (2,2,2) , padding='same')(c5) p6 = Dropout(dropout)(p6) #c6 = conv_block(p5,", "X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X) if", "Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose from keras.layers.pooling import MaxPooling2D, GlobalMaxPool2D,MaxPooling3D from keras.layers.merge import", ")(c1) c3 = conv_block(c2 , n_filters*2,5,True) p3 = Conv3D(n_filters*4,kernel_size =", "p6 = Dropout(dropout)(p6) #c6 = conv_block(p5, n_filters*8,5,True) #p6 = Conv3D(n_filters*16,kernel_size", "batch_norm: X = BatchNormalization()(X) X = Activation('relu')(X) X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X)", "cv2 import nibabel as nib from PIL import Image def", "= Conv3DTranspose(n_filters*4,(2,2,2),strides = (2,2,2) , padding= 'same')(c7); u8 = concatenate([u7,c4]);", "X = Activation('relu')(X) X = Conv3D(num_filters,kernel_size=(kernel_size,kernel_size,kernel_size),strides=(1,1,1),padding='same')(X) if batch_norm: X =", "from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img from skimage.io import", "X = Activation('relu')(X) X = add([input_mat,X]); return X def Vnet_3d(input_img,", "Conv3DTranspose(n_filters,(2,2,2),strides = (2,2,2) , padding= 'same')(c9); u10 = concatenate([u10,c1]); c10", "keras.optimizers import Adam from keras.preprocessing.image import ImageDataGenerator, array_to_img, img_to_array, load_img", "Reshape from keras.layers.convolutional import Conv2D, Conv2DTranspose,Conv3D,Conv3DTranspose from keras.layers.pooling import MaxPooling2D,", "import to_categorical from keras import metrics from keras.models import Model," ]
[ "= None title: str = None instruction: str = None", "str = None img: str = None tag: str =", "= None instruction: str = None instruction_html: str = None", "import BaseModel # returned from https://vk.com/dev/account.getActiveOffers class ActiveOffer(BaseModel): id: str", "ActiveOffer(BaseModel): id: str = None title: str = None instruction:", "from ..base import BaseModel # returned from https://vk.com/dev/account.getActiveOffers class ActiveOffer(BaseModel):", "BaseModel # returned from https://vk.com/dev/account.getActiveOffers class ActiveOffer(BaseModel): id: str =", "str = None title: str = None instruction: str =", "= None short_description: str = None description: str = None", "None img: str = None tag: str = None price:", "str = None tag: str = None price: int =", "instruction_html: str = None short_description: str = None description: str", "str = None instruction_html: str = None short_description: str =", "None short_description: str = None description: str = None img:", "https://vk.com/dev/account.getActiveOffers class ActiveOffer(BaseModel): id: str = None title: str =", "..base import BaseModel # returned from https://vk.com/dev/account.getActiveOffers class ActiveOffer(BaseModel): id:", "None instruction_html: str = None short_description: str = None description:", "short_description: str = None description: str = None img: str", "returned from https://vk.com/dev/account.getActiveOffers class ActiveOffer(BaseModel): id: str = None title:", "None title: str = None instruction: str = None instruction_html:", "= None instruction_html: str = None short_description: str = None", "# returned from https://vk.com/dev/account.getActiveOffers class ActiveOffer(BaseModel): id: str = None", "id: str = None title: str = None instruction: str", "title: str = None instruction: str = None instruction_html: str", "img: str = None tag: str = None price: int", "= None img: str = None tag: str = None", "class ActiveOffer(BaseModel): id: str = None title: str = None", "= None description: str = None img: str = None", "= None tag: str = None price: int = None", "from https://vk.com/dev/account.getActiveOffers class ActiveOffer(BaseModel): id: str = None title: str", "str = None instruction: str = None instruction_html: str =", "description: str = None img: str = None tag: str", "None description: str = None img: str = None tag:", "str = None description: str = None img: str =", "None instruction: str = None instruction_html: str = None short_description:", "str = None short_description: str = None description: str =", "instruction: str = None instruction_html: str = None short_description: str" ]
[ "under The MIT License [see LICENSE for details] # Written", "1, biased=False, relu=False, name='res5c_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c')", ".conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4f_branch2c') .batch_normalization(name='bn4f_branch2c',is_training=False,relu=False)) (self.feed('res4e_relu',", ".relu(name='res5b_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5b_branch2b_offset',", "relu=False, name='cls_score') # .softmax(name='cls_prob')) # (self.feed('res5c_relu') # .fc(n_classes*4, relu=False, name='bbox_pred'))", "# -------------------------------------------------------- import tensorflow as tf from .network import Network", "'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu') .conv(1, 1, 512, 2, 2, biased=False, relu=False,", "'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu') .conv(1, 1, 64, 1, 1, biased=False, relu=False,", "relu=False, name='res2b_branch2a') .batch_normalization(relu=True, name='bn2b_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False,", "1, 256, 1, 1, biased=False, relu=False, name='res4e_branch2a') .batch_normalization(relu=True, name='bn4e_branch2a',is_training=False) .conv(3,", ".add(name='res4b') .relu(name='res4b_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4c_branch2a')", "name='cls_score', relu=False) .softmax(name='cls_prob')) (self.feed('fc_new_2') .fc(num_out=4*n_classes, name='bbox_pred', relu=False)) # (self.feed('res4f_relu','roi-data') #", "name='bn4a_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b') .batch_normalization(relu=True,", "name='res4d_branch2a') .batch_normalization(relu=True, name='bn4d_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False,", "Copyright (c) 2016 # Licensed under The MIT License [see", "name='bn4b_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b_branch2b') .batch_normalization(relu=True,", "256, 1, 1, biased=False, relu=False, name='res4f_branch2a') .batch_normalization(relu=True, name='bn4f_branch2a',is_training=False) .conv(3, 3,", "relu=False, name='res4e_branch2b') .batch_normalization(relu=True, name='bn4e_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False,", "relu=False, num_deform_group=4, name='res5a_branch2b') .batch_normalization(relu=False, name='bn5a_branch2b') .relu(name='res5a_branch2b_relu') .conv(1, 1, 2048, 1,", "1, 1, biased=False, relu=False, name='res4c_branch2a') .batch_normalization(relu=True, name='bn4c_branch2a',is_training=False) .conv(3, 3, 256,", "# .batch_normalization(relu=True, name='bn5a_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False,", "<reponame>yangxue0827/TF_Deformable_Net # -------------------------------------------------------- # TFFRCNN - Resnet50 # Copyright (c)", ".conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c', padding='VALID') .batch_normalization(relu=False,", "# 'bn5b_branch2c') # .add(name='res5b') # .relu(name='res5b_relu') # .conv(1, 1, 512,", "1, 1024, 1, 1, biased=False, relu=False, name='res4d_branch2c') .batch_normalization(name='bn4d_branch2c',is_training=False,relu=False)) (self.feed('res4c_relu', 'bn4d_branch2c')", "1, biased=False, relu=False, name='res5b_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b')", "3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5c_branch2b') .batch_normalization(relu=False,", "# .fc(n_classes, relu=False, name='cls_score') # .softmax(name='cls_prob')) # (self.feed('res5c_relu') # .fc(n_classes*4,", "relu=False) .softmax(name='cls_prob')) (self.feed('fc_new_2') .fc(num_out=4*n_classes, name='bbox_pred', relu=False)) # (self.feed('res4f_relu','roi-data') # .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling')", "padding='VALID',name='pool1') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1') .batch_normalization(name='bn2a_branch1',is_training=False,relu=False))", "relu=False, name='res2a_branch2b') .batch_normalization(relu=True, name='bn2a_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False,", "1, biased=False, relu=False, name='res4f_branch2b') .batch_normalization(relu=True, name='bn4f_branch2b',is_training=False) .conv(1, 1, 1024, 1,", "relu=False, name='res2a_branch2a') .batch_normalization(relu=True, name='bn2a_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False,", "name='res2a_branch2c') .batch_normalization(name='bn2a_branch2c',is_training=False,relu=False)) (self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu') .conv(1, 1, 64, 1,", ".batch_normalization(relu=False, name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu') .conv(1, 1, 256, 1,", "def setup(self): n_classes = cfg.NCLASSES # anchor_scales = [8, 16,", ".conv(1, 1, 256, 1, 1, relu=False, name='conv_new_1') .relu(name='conv_new_1_relu')) (self.feed('conv_new_1_relu', 'roi-data')", "* 2, name='offset', relu=False) .reshape(shape=(-1,2,7,7), name='offset_reshape')) (self.feed('conv_new_1_relu', 'roi-data', 'offset_reshape') .deform_psroi_pool(group_size=1,", "name='res4c_branch2b') .batch_normalization(relu=True, name='bn4c_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False,", "1, 1, biased=False, relu=False, name='res5a_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2a') .relu(name='res5a_branch2a_relu') .conv(3,", "(self.feed('fc_new_2') .fc(num_out=n_classes, name='cls_score', relu=False) .softmax(name='cls_prob')) (self.feed('fc_new_2') .fc(num_out=4*n_classes, name='bbox_pred', relu=False)) #", "64, 1, 1, biased=False, relu=False, name='res2b_branch2a') .batch_normalization(relu=True, name='bn2b_branch2a',is_training=False) .conv(3, 3,", "relu=False, name='conv1') .batch_normalization(relu=True, name='bn_conv1', is_training=False) .max_pool(3, 3, 2, 2, padding='VALID',name='pool1')", "name='res3c_branch2c') .batch_normalization(name='bn3c_branch2c',is_training=False,relu=False)) (self.feed('res3b_relu', 'bn3c_branch2c') .add(name='res3c') .relu(name='res3c_relu') .conv(1, 1, 128, 1,", "Licensed under The MIT License [see LICENSE for details] #", "biased=False, relu=False, name='res4b_branch2b') .batch_normalization(relu=True, name='bn4b_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1,", "1, biased=False, relu=False, name='res4d_branch2c') .batch_normalization(name='bn4d_branch2c',is_training=False,relu=False)) (self.feed('res4c_relu', 'bn4d_branch2c') .add(name='res4d') .relu(name='res4d_relu') .conv(1,", "'res5a_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4,", "name='res3b_branch2c') .batch_normalization(name='bn3b_branch2c',is_training=False,relu=False)) (self.feed('res3a_relu', 'bn3b_branch2c') .add(name='res3b') .relu(name='res3b_relu') .conv(1, 1, 128, 1,", "relu=False, name='res2c_branch2a') .batch_normalization(relu=True, name='bn2c_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False,", ".conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a') # .batch_normalization(relu=True,", "3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5a_branch2b') .batch_normalization(relu=False,", "# .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c') #", ".batch_normalization(relu=False, name='bn5c_branch2a') .relu(name='res5c_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True, rate=2,", "tf.placeholder(tf.float32) self.layers = dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\\ 'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas})", "self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard') self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4],", "# .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5a_branch2b') #", "1, biased=False, relu=False, name='res4a_branch2c') .batch_normalization(name='bn4a_branch2c',is_training=False,relu=False)) (self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu') .conv(1,", "1, 1, biased=False, relu=False, name='res2b_branch2a') .batch_normalization(relu=True, name='bn2b_branch2a',is_training=False) .conv(3, 3, 64,", "License [see LICENSE for details] # Written by miraclebiu #", "biased=False, relu=False, name='res2a_branch2c') .batch_normalization(name='bn2a_branch2c',is_training=False,relu=False)) (self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu') .conv(1, 1,", "1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c') .batch_normalization(name='bn2a_branch2c',is_training=False,relu=False)) (self.feed('bn2a_branch1', 'bn2a_branch2c')", "name='fc_new_1') .fc(num_out=1024, name='fc_new_2')) (self.feed('fc_new_2') .fc(num_out=n_classes, name='cls_score', relu=False) .softmax(name='cls_prob')) (self.feed('fc_new_2') .fc(num_out=4*n_classes,", "biased=False, relu=False, name='res2c_branch2c') .batch_normalization(name='bn2c_branch2c',is_training=False,relu=False)) (self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu') .conv(1, 1,", "3, 256, 1, 1, biased=False, relu=False, name='res4d_branch2b') .batch_normalization(relu=True, name='bn4d_branch2b',is_training=False) .conv(1,", "(self.feed('res3b_relu', 'bn3c_branch2c') .add(name='res3c') .relu(name='res3c_relu') .conv(1, 1, 128, 1, 1, biased=False,", "1, 1, biased=True, rate=2, relu=False, name='res5a_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5a_branch2a_relu', 'res5a_branch2b_offset')", "name='res2a_branch1') .batch_normalization(name='bn2a_branch1',is_training=False,relu=False)) (self.feed('pool1') .conv(1, 1, 64, 1, 1, biased=False, relu=False,", ".relu(name='res5b_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c', padding='VALID')", "-------------------------------------------------------- import tensorflow as tf from .network import Network from", "72, 1, 1, biased=True, rate=2, relu=False, name='res5c_branch2b_offset', padding='SAME', initializer='zeros') )", ".batch_normalization(relu=True, name='bn2c_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c')", "3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b') .batch_normalization(relu=True, name='bn4a_branch2b',is_training=False) .conv(1,", ".add(name='res5c') .relu(name='res5c_relu') .conv(1, 1, 256, 1, 1, relu=False, name='conv_new_1') .relu(name='conv_new_1_relu'))", ".relu(name='res2a_relu') .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a') .batch_normalization(relu=True,", "name='bn5a_branch2b') .relu(name='res5a_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c',", ".add(name='res5a') .relu(name='res5a_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a',", "relu=False, name='res5b_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5b_branch2a_relu', 'res5b_branch2b_offset') .deform_conv(3, 3, 512, 1,", ".batch_normalization(name='bn5c_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5b_relu', # 'bn5c_branch2c') # .add(name='res5c') #", "256, 1, 1, biased=False, relu=False, name='res2b_branch2c') .batch_normalization(name='bn2b_branch2c',is_training=False,relu=False)) (self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b')", ".relu(name='res3a_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b_branch2a') .batch_normalization(relu=True,", "biased=False, relu=False, name='res3c_branch2a') .batch_normalization(relu=True, name='bn3c_branch2a',is_training=False) .conv(3, 3, 128, 1, 1,", "1, biased=False, relu=False, name='res5b_branch2c') # .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5a_relu',", "details] # Written by miraclebiu # -------------------------------------------------------- import tensorflow as", "# .batch_normalization(relu=True, name='bn5b_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False,", "= tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data') self.im_info = tf.placeholder(tf.float32,", ".batch_normalization(relu=True, name='bn2a_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b')", "biased=False, relu=False, name='res4d_branch2a') .batch_normalization(relu=True, name='bn4d_branch2a',is_training=False) .conv(3, 3, 256, 1, 1,", "1, biased=False, relu=False, name='res5a_branch1', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch1')) (self.feed('res4f_relu') .conv(1, 1,", ".conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4e_branch2a') .batch_normalization(relu=True, name='bn4e_branch2a',is_training=False)", ".fc(num_out=1024, name='fc_new_1') .fc(num_out=1024, name='fc_new_2')) (self.feed('fc_new_2') .fc(num_out=n_classes, name='cls_score', relu=False) .softmax(name='cls_prob')) (self.feed('fc_new_2')", "relu=False, name='res4b_branch2a') .batch_normalization(relu=True, name='bn4b_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False,", "= [8, 16, 32] anchor_scales = cfg.ANCHOR_SCALES _feat_stride = [16,", "self.layers = dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\\ 'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas}) self.trainable", "name='res3c_branch2a') .batch_normalization(relu=True, name='bn3c_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False,", "relu=False, name='res4d_branch2b') .batch_normalization(relu=True, name='bn4d_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False,", "1, 1024, 1, 1, biased=False, relu=False, name='res4f_branch2c') .batch_normalization(name='bn4f_branch2c',is_training=False,relu=False)) (self.feed('res4e_relu', 'bn4f_branch2c')", "relu = False, name='rpn_cls_score')) (self.feed('rpn_cls_score', 'gt_boxes', 'gt_ishard', 'dontcare_areas', 'im_info') .anchor_target_layer(_feat_stride,", "#pdb.set_trace() # (self.feed('res5a_relu', # 'bn5b_branch2c') # .add(name='res5b') # .relu(name='res5b_relu') #", "relu=False, name='res3a_branch2c') .batch_normalization(name='bn3a_branch2c',is_training=False,relu=False)) (self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu') .conv(1, 1, 128,", "1, biased=False, relu=False, name='res3b_branch2a') .batch_normalization(relu=True, name='bn3b_branch2a',is_training=False) .conv(3, 3, 128, 1,", ".batch_normalization(relu=True, name='bn3b_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b_branch2b')", ".conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b_branch2b') .batch_normalization(relu=True, name='bn4b_branch2b',is_training=False)", ".batch_normalization(relu=True, name='bn4a_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b')", "# .flatten_data(name='offset_flatten') .fc(num_out=7 * 7 * 2, name='offset', relu=False) .reshape(shape=(-1,2,7,7),", ".softmax(name='cls_prob')) (self.feed('fc_new_2') .fc(num_out=4*n_classes, name='bbox_pred', relu=False)) # (self.feed('res4f_relu','roi-data') # .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling') #", ".batch_normalization(relu=True, name='bn2a_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c')", "biased=False, relu=False, name='res4d_branch2c') .batch_normalization(name='bn4d_branch2c',is_training=False,relu=False)) (self.feed('res4c_relu', 'bn4d_branch2c') .add(name='res4d') .relu(name='res4d_relu') .conv(1, 1,", "1, 1, biased=False, relu=False, name='res3a_branch2c') .batch_normalization(name='bn3a_branch2c',is_training=False,relu=False)) (self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu')", "# 'bn5c_branch2c') # .add(name='res5c') # .relu(name='res5c_relu') # .fc(n_classes, relu=False, name='cls_score')", "(self.feed('rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred')) #=========", "name='bn5b_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c')", "2048, 1, 1, biased=False, relu=False, name='res5a_branch2c') # .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False)) # (self.feed('res5a_branch2a_roipooling')", "(self.feed('res4b_relu', 'bn4c_branch2c') .add(name='res4c') .relu(name='res4c_relu') .conv(1, 1, 256, 1, 1, biased=False,", "# Licensed under The MIT License [see LICENSE for details]", "biased=False, relu=False, name='res2a_branch1') .batch_normalization(name='bn2a_branch1',is_training=False,relu=False)) (self.feed('pool1') .conv(1, 1, 64, 1, 1,", "self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes') self.gt_ishard = tf.placeholder(tf.int32, shape=[None],", "1, biased=False, relu=False, name='res2b_branch2a') .batch_normalization(relu=True, name='bn2b_branch2a',is_training=False) .conv(3, 3, 64, 1,", "[16, ] (self.feed('data') .conv(7, 7, 64, 2, 2, relu=False, name='conv1')", "1, biased=False, relu=False, name='res3d_branch2c') .batch_normalization(name='bn3d_branch2c',is_training=False,relu=False)) (self.feed('res3c_relu', 'bn3d_branch2c') .add(name='res3d') .relu(name='res3d_relu') .conv(1,", "relu=False, name='res5c_branch2b_offset', padding='SAME', initializer='zeros') ) (self.feed('res5c_branch2a_relu', 'res5c_branch2b_offset') .deform_conv(3, 3, 512,", "1, 256, 1, 1, biased=False, relu=False, name='res4f_branch2a') .batch_normalization(relu=True, name='bn4f_branch2a',is_training=False) .conv(3,", "shape=[None, 4], name='dontcare_areas') self.keep_prob = tf.placeholder(tf.float32) self.layers = dict({'data':self.data, 'im_info':self.im_info,", ".conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c') # .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False))", "relu=False, name='res5c_branch2a') # .batch_normalization(relu=True, name='bn5c_branch2a',is_training=False) # .conv(3, 3, 512, 1,", ".conv(7, 7, 64, 2, 2, relu=False, name='conv1') .batch_normalization(relu=True, name='bn_conv1', is_training=False)", "relu=False, name='res5c_branch2c') # .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5b_relu', # 'bn5c_branch2c')", "= tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes') self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard')", "name='bn5a_branch2a') .relu(name='res5a_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False,", "name='bn3c_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3c_branch2b') .batch_normalization(relu=True,", "1, 1, biased=False, relu=False, name='res4f_branch2b') .batch_normalization(relu=True, name='bn4f_branch2b',is_training=False) .conv(1, 1, 1024,", "1, 1, biased=False, relu=False, name='res5a_branch2b') # .batch_normalization(relu=True, name='bn5a_branch2b',is_training=False) # .conv(1,", ".conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch1', padding='VALID') .batch_normalization(relu=False,", "name='bn2c_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c') .batch_normalization(name='bn2c_branch2c',is_training=False,relu=False))", "name='res5c_branch2b') .batch_normalization(relu=False, name='bn5c_branch2b') .relu(name='res5c_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False,", "relu=False, name='res5b_branch2b') # .batch_normalization(relu=True, name='bn5b_branch2b',is_training=False) # .conv(1, 1, 2048, 1,", "= 'rpn_cls_prob_reshape')) (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info') .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois')) (self.feed('rpn_rois','gt_boxes', 'gt_ishard',", "1, biased=False, relu=False, name='res4a_branch2b') .batch_normalization(relu=True, name='bn4a_branch2b',is_training=False) .conv(1, 1, 1024, 1,", ".relu(name='res4f_relu')) #========= RPN ============ (self.feed('res4f_relu') .conv(3,3,512,1,1,name='rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*2 ,1 , 1,", "name='res4b_branch2c') .batch_normalization(name='bn4b_branch2c',is_training=False,relu=False)) (self.feed('res4a_relu', 'bn4b_branch2c') .add(name='res4b') .relu(name='res4b_relu') .conv(1, 1, 256, 1,", "1, biased=False, relu=False, name='res4c_branch2a') .batch_normalization(relu=True, name='bn4c_branch2a',is_training=False) .conv(3, 3, 256, 1,", "'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False,", ".conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3d_branch2c') .batch_normalization(name='bn3d_branch2c',is_training=False,relu=False)) (self.feed('res3c_relu',", "miraclebiu # -------------------------------------------------------- import tensorflow as tf from .network import", "1, 1, biased=False, relu=False, name='res4d_branch2a') .batch_normalization(relu=True, name='bn4d_branch2a',is_training=False) .conv(3, 3, 256,", ".flatten_data(name='offset_flatten') .fc(num_out=7 * 7 * 2, name='offset', relu=False) .reshape(shape=(-1,2,7,7), name='offset_reshape'))", "name = 'rpn_cls_prob_reshape')) (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info') .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois')) (self.feed('rpn_rois','gt_boxes',", "relu=False, name='res2a_branch1') .batch_normalization(name='bn2a_branch1',is_training=False,relu=False)) (self.feed('pool1') .conv(1, 1, 64, 1, 1, biased=False,", "1, biased=False, relu=False, name='res4b_branch2b') .batch_normalization(relu=True, name='bn4b_branch2b',is_training=False) .conv(1, 1, 1024, 1,", "biased=False, relu=False, name='res4f_branch2c') .batch_normalization(name='bn4f_branch2c',is_training=False,relu=False)) (self.feed('res4e_relu', 'bn4f_branch2c') .add(name='res4f') .relu(name='res4f_relu')) #========= RPN", "rpn_boxes (self.feed('rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred'))", "= dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\\ 'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas}) self.trainable =", "name='rpn_bbox_pred')) #========= RoI Proposal ============ (self.feed('rpn_cls_score') .spatial_reshape_layer(2, name = 'rpn_cls_score_reshape')", "2, 2, biased=False, relu=False, name='res4a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn4a_branch2a',is_training=False) .conv(3, 3,", ".conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1') .batch_normalization(name='bn2a_branch1',is_training=False,relu=False)) (self.feed('pool1')", "1, biased=False, relu=False, name='res3d_branch2b') .batch_normalization(relu=True, name='bn3d_branch2b',is_training=False) .conv(1, 1, 512, 1,", "shape=[None, None, None, 3], name='data') self.im_info = tf.placeholder(tf.float32, shape=[None, 3],", "= 'rpn_rois')) (self.feed('rpn_rois','gt_boxes', 'gt_ishard', 'dontcare_areas') .proposal_target_layer(n_classes,name = 'roi-data')) #========= RCNN", "3, 512, 1, 1, biased=False, relu=False, name='res5c_branch2b') # .batch_normalization(relu=True, name='bn5c_branch2b',is_training=False)", ".batch_normalization(relu=True, name='bn5c_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False, relu=False,", "biased=True, rate=2, relu=False, name='res5a_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5a_branch2a_relu', 'res5a_branch2b_offset') .deform_conv(3, 3,", "-------------------------------------------------------- # TFFRCNN - Resnet50 # Copyright (c) 2016 #", "1024, 2, 2, biased=False, relu=False, name='res4a_branch1', padding='VALID') .batch_normalization(name='bn4a_branch1',is_training=False,relu=False)) (self.feed('res3d_relu') .conv(1,", "= tf.placeholder(tf.float32, shape=[None, 3], name='im_info') self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5],", ".add(name='res2a') .relu(name='res2a_relu') .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a')", "512, 1, 1, biased=False, relu=False, name='res5c_branch2a') # .batch_normalization(relu=True, name='bn5c_branch2a',is_training=False) #", "3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b') .batch_normalization(relu=True, name='bn2c_branch2b',is_training=False) .conv(1,", "128, 1, 1, biased=False, relu=False, name='res3d_branch2b') .batch_normalization(relu=True, name='bn3d_branch2b',is_training=False) .conv(1, 1,", "1, 1, biased=False, relu=False, name='res3d_branch2a') .batch_normalization(relu=True, name='bn3d_branch2a',is_training=False) .conv(3, 3, 128,", "name='res5c_branch2a') # .batch_normalization(relu=True, name='bn5c_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1,", "TFFRCNN - Resnet50 # Copyright (c) 2016 # Licensed under", "1, 1, biased=False, relu=False, name='res4e_branch2b') .batch_normalization(relu=True, name='bn4e_branch2b',is_training=False) .conv(1, 1, 1024,", "rate=2, relu=False, name='res5c_branch2b_offset', padding='SAME', initializer='zeros') ) (self.feed('res5c_branch2a_relu', 'res5c_branch2b_offset') .deform_conv(3, 3,", ".fc(num_out=7 * 7 * 2, name='offset', relu=False) .reshape(shape=(-1,2,7,7), name='offset_reshape')) (self.feed('conv_new_1_relu',", "= tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas') self.keep_prob = tf.placeholder(tf.float32) self.layers =", "1, 1, biased=False, relu=False, name='res4e_branch2a') .batch_normalization(relu=True, name='bn4e_branch2a',is_training=False) .conv(3, 3, 256,", ".batch_normalization(name='bn4f_branch2c',is_training=False,relu=False)) (self.feed('res4e_relu', 'bn4f_branch2c') .add(name='res4f') .relu(name='res4f_relu')) #========= RPN ============ (self.feed('res4f_relu') .conv(3,3,512,1,1,name='rpn_conv/3x3')", "name='res5c_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2a') .relu(name='res5c_branch2a_relu') .conv(3, 3, 72, 1, 1,", "# (self.feed('res5a_relu', # 'bn5b_branch2c') # .add(name='res5b') # .relu(name='res5b_relu') # .conv(1,", "for details] # Written by miraclebiu # -------------------------------------------------------- import tensorflow", "'bn4b_branch2c') .add(name='res4b') .relu(name='res4b_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False,", "1, biased=True, rate=2, relu=False, name='res5c_branch2b_offset', padding='SAME', initializer='zeros') ) (self.feed('res5c_branch2a_relu', 'res5c_branch2b_offset')", "3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b') .batch_normalization(relu=True, name='bn3a_branch2b',is_training=False) .conv(1,", "'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu') .conv(1, 1, 256, 1, 1, relu=False, name='conv_new_1')", "relu=False)) # (self.feed('res4f_relu','roi-data') # .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling') # .conv(1, 1, 512, 2,", "anchor_scales = [8, 16, 32] anchor_scales = cfg.ANCHOR_SCALES _feat_stride =", ".batch_normalization(relu=True, name='bn2b_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b')", ".conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res5a_branch2a', padding='VALID') #", "'TRAIN',name = 'rpn_rois')) (self.feed('rpn_rois','gt_boxes', 'gt_ishard', 'dontcare_areas') .proposal_target_layer(n_classes,name = 'roi-data')) #=========", "1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch1', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch1'))", "(self.feed('res5a_relu', # 'bn5b_branch2c') # .add(name='res5b') # .relu(name='res5b_relu') # .conv(1, 1,", "1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a') # .batch_normalization(relu=True, name='bn5c_branch2a',is_training=False)", "None, None, 3], name='data') self.im_info = tf.placeholder(tf.float32, shape=[None, 3], name='im_info')", "256, 1, 1, biased=False, relu=False, name='res4c_branch2b') .batch_normalization(relu=True, name='bn4c_branch2b',is_training=False) .conv(1, 1,", "'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\\ 'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas}) self.trainable = trainable self.setup()", "relu=False, name='res3a_branch1', padding='VALID') .batch_normalization(name='bn3a_branch1',is_training=False,relu=False)) (self.feed('res2c_relu') .conv(1, 1, 128, 2, 2,", "biased=False, relu=False, name='res4e_branch2a') .batch_normalization(relu=True, name='bn4e_branch2a',is_training=False) .conv(3, 3, 256, 1, 1,", "1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1', padding='VALID') .batch_normalization(name='bn4a_branch1',is_training=False,relu=False)) (self.feed('res3d_relu')", "name='res4f_branch2c') .batch_normalization(name='bn4f_branch2c',is_training=False,relu=False)) (self.feed('res4e_relu', 'bn4f_branch2c') .add(name='res4f') .relu(name='res4f_relu')) #========= RPN ============ (self.feed('res4f_relu')", "1, biased=False, relu=False, name='res5c_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2a') .relu(name='res5c_branch2a_relu') .conv(3, 3,", "biased=False, rate=2, relu=False, num_deform_group=4, name='res5b_branch2b') .batch_normalization(relu=False, name='bn5b_branch2b') .relu(name='res5b_branch2b_relu') .conv(1, 1,", "# .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a') #", "# .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5a_relu', # 'bn5b_branch2c') # .add(name='res5b')", "# .relu(name='res5a_relu') # .conv(1, 1, 512, 1, 1, biased=False, relu=False,", "# (self.feed('res4f_relu','roi-data') # .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling') # .conv(1, 1, 512, 2, 2,", "1, biased=False, relu=False, name='res2b_branch2b') .batch_normalization(relu=True, name='bn2b_branch2b',is_training=False) .conv(1, 1, 256, 1,", ".add(name='res3a') .relu(name='res3a_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b_branch2a')", ".conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4f_branch2b') .batch_normalization(relu=True, name='bn4f_branch2b',is_training=False)", "'bn3b_branch2c') .add(name='res3b') .relu(name='res3b_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False,", "pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='deformable_roi_pool') .fc(num_out=1024, name='fc_new_1')", "name='res5a_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu') .conv(1, 1,", "(self.feed('bn5a_branch2c','bn5a_branch1') # .add(name='res5a') # .relu(name='res5a_relu') # .conv(1, 1, 512, 1,", "biased=False, relu=False, name='res5c_branch2c') # .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5b_relu', #", ".conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4a_branch2b') .batch_normalization(relu=True, name='bn4a_branch2b',is_training=False)", ".spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape')) (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info') .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois'))", ".conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score')) (self.feed('rpn_cls_score',", ".relu(name='conv_new_1_relu')) (self.feed('conv_new_1_relu', 'roi-data') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, trans_std=1e-1,", "1, 1, biased=False, relu=False, name='res4b_branch2a') .batch_normalization(relu=True, name='bn4b_branch2a',is_training=False) .conv(3, 3, 256,", "1, biased=False, relu=False, name='res3c_branch2c') .batch_normalization(name='bn3c_branch2c',is_training=False,relu=False)) (self.feed('res3b_relu', 'bn3c_branch2c') .add(name='res3c') .relu(name='res3c_relu') .conv(1,", "LICENSE for details] # Written by miraclebiu # -------------------------------------------------------- import", "name='res2a_branch2a') .batch_normalization(relu=True, name='bn2a_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False, relu=False,", "64, 2, 2, relu=False, name='conv1') .batch_normalization(relu=True, name='bn_conv1', is_training=False) .max_pool(3, 3,", "1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5c_branch2b') .batch_normalization(relu=False, name='bn5c_branch2b') .relu(name='res5c_branch2b_relu')", "- Resnet50 # Copyright (c) 2016 # Licensed under The", "# .batch_normalization(relu=True, name='bn5a_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False,", "1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c') .batch_normalization(name='bn4a_branch2c',is_training=False,relu=False)) (self.feed('bn4a_branch1', 'bn4a_branch2c')", "spatial_scale=0.0625, name='deformable_roi_pool') .fc(num_out=1024, name='fc_new_1') .fc(num_out=1024, name='fc_new_2')) (self.feed('fc_new_2') .fc(num_out=n_classes, name='cls_score', relu=False)", ".deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5a_branch2b')", "= trainable self.setup() def setup(self): n_classes = cfg.NCLASSES # anchor_scales", "(self.feed('res5c_branch2a_relu', 'res5c_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False,", "name='bn4b_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b_branch2c') .batch_normalization(name='bn4b_branch2c',is_training=False,relu=False))", "1, biased=False, relu=False, name='res2a_branch2a') .batch_normalization(relu=True, name='bn2a_branch2a',is_training=False) .conv(3, 3, 64, 1,", "relu=False, name='res4f_branch2c') .batch_normalization(name='bn4f_branch2c',is_training=False,relu=False)) (self.feed('res4e_relu', 'bn4f_branch2c') .add(name='res4f') .relu(name='res4f_relu')) #========= RPN ============", "name='res4e_branch2a') .batch_normalization(relu=True, name='bn4e_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False,", "1, 1024, 1, 1, biased=False, relu=False, name='res4e_branch2c') .batch_normalization(name='bn4e_branch2c',is_training=False,relu=False)) (self.feed('res4d_relu', 'bn4e_branch2c')", "1, padding='VALID', relu = False, name='rpn_bbox_pred')) #========= RoI Proposal ============", "'rpn_cls_score_reshape') .spatial_softmax(name='rpn_cls_prob')) (self.feed('rpn_cls_prob') .spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape')) (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info') .proposal_layer(_feat_stride, anchor_scales,", "1, 128, 1, 1, biased=False, relu=False, name='res3c_branch2a') .batch_normalization(relu=True, name='bn3c_branch2a',is_training=False) .conv(3,", "biased=False, relu=False, name='res4a_branch2c') .batch_normalization(name='bn4a_branch2c',is_training=False,relu=False)) (self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu') .conv(1, 1,", ".relu(name='res5a_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a', padding='VALID')", "2048, 1, 1, biased=False, relu=False, name='res5c_branch2c') # .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False)) # #pdb.set_trace()", "1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c') # .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False)) #", ".batch_normalization(name='bn3d_branch2c',is_training=False,relu=False)) (self.feed('res3c_relu', 'bn3d_branch2c') .add(name='res3d') .relu(name='res3d_relu') .conv(1, 1, 1024, 2, 2,", "(self.feed('res5b_relu', # 'bn5c_branch2c') # .add(name='res5c') # .relu(name='res5c_relu') # .fc(n_classes, relu=False,", "# -------------------------------------------------------- # TFFRCNN - Resnet50 # Copyright (c) 2016", "biased=False, relu=False, name='res4b_branch2a') .batch_normalization(relu=True, name='bn4b_branch2a',is_training=False) .conv(3, 3, 256, 1, 1,", "self.dontcare_areas}) self.trainable = trainable self.setup() def setup(self): n_classes = cfg.NCLASSES", "name='res3b_branch2a') .batch_normalization(relu=True, name='bn3b_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False,", "1, 1, biased=False, relu=False, name='res2c_branch2c') .batch_normalization(name='bn2c_branch2c',is_training=False,relu=False)) (self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu')", "1, 1, biased=True, rate=2, relu=False, name='res5c_branch2b_offset', padding='SAME', initializer='zeros') ) (self.feed('res5c_branch2a_relu',", "relu=False, name='conv_new_1') .relu(name='conv_new_1_relu')) (self.feed('conv_new_1_relu', 'roi-data') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7,", "1024, 1, 1, biased=False, relu=False, name='res4d_branch2c') .batch_normalization(name='bn4d_branch2c',is_training=False,relu=False)) (self.feed('res4c_relu', 'bn4d_branch2c') .add(name='res4d')", "(self.feed('res2c_relu') .conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a', padding='VALID')", "3, 256, 1, 1, biased=False, relu=False, name='res4c_branch2b') .batch_normalization(relu=True, name='bn4c_branch2b',is_training=False) .conv(1,", "biased=False, relu=False, name='res2c_branch2a') .batch_normalization(relu=True, name='bn2c_branch2a',is_training=False) .conv(3, 3, 64, 1, 1,", "padding='VALID') .batch_normalization(name='bn4a_branch1',is_training=False,relu=False)) (self.feed('res3d_relu') .conv(1, 1, 256, 2, 2, biased=False, relu=False,", ".batch_normalization(relu=True, name='bn4b_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b_branch2c')", ".relu(name='res4c_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4d_branch2a') .batch_normalization(relu=True,", "relu=False, name='res4a_branch2b') .batch_normalization(relu=True, name='bn4a_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False,", "cfg class Resnet50_train(Network): def __init__(self, trainable=True): self.inputs = [] self.data", "initializer='zeros')) (self.feed('res5a_branch2a_relu', 'res5a_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False, rate=2,", ".max_pool(3, 3, 2, 2, padding='VALID',name='pool1') .conv(1, 1, 256, 1, 1,", "1, biased=False, relu=False, name='res5a_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a')", "'res5c_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4,", "= 'rpn_cls_score_reshape') .spatial_softmax(name='rpn_cls_prob')) (self.feed('rpn_cls_prob') .spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape')) (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info') .proposal_layer(_feat_stride,", ".batch_normalization(name='bn5a_branch2c',is_training=False,relu=False)) # (self.feed('res5a_branch2a_roipooling') # .conv(1,1,2048,2,2,biased=False, relu=False, name='res5a_branch1', padding='VALID') # .batch_normalization(name='bn5a_branch1',is_training=False,relu=False))", "(self.feed('rpn_cls_score', 'gt_boxes', 'gt_ishard', 'dontcare_areas', 'im_info') .anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data'", ".batch_normalization(relu=False, name='bn5c_branch2b') .relu(name='res5c_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False,", "1, 1, biased=False, relu=False, name='res2c_branch2a') .batch_normalization(relu=True, name='bn2c_branch2a',is_training=False) .conv(3, 3, 64,", "= tf.placeholder(tf.float32) self.layers = dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\\ 'gt_ishard': self.gt_ishard, 'dontcare_areas':", "self.keep_prob = tf.placeholder(tf.float32) self.layers = dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\\ 'gt_ishard': self.gt_ishard,", ".conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4d_branch2a') .batch_normalization(relu=True, name='bn4d_branch2a',is_training=False)", "512, 2, 2, biased=False, relu=False, name='res5a_branch2a', padding='VALID') # .batch_normalization(relu=True, name='bn5a_branch2a',is_training=False)", "padding='VALID') .batch_normalization(relu=False, name='bn5a_branch1')) (self.feed('res4f_relu') .conv(1, 1, 512, 1, 1, biased=False,", "# .relu(name='res5c_relu') # .fc(n_classes, relu=False, name='cls_score') # .softmax(name='cls_prob')) # (self.feed('res5c_relu')", "# Copyright (c) 2016 # Licensed under The MIT License", "pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='offset_t') # .flatten_data(name='offset_flatten')", "2, 2, relu=False, name='conv1') .batch_normalization(relu=True, name='bn_conv1', is_training=False) .max_pool(3, 3, 2,", "1, biased=False, relu=False, name='res3d_branch2a') .batch_normalization(relu=True, name='bn3d_branch2a',is_training=False) .conv(3, 3, 128, 1,", "'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas}) self.trainable = trainable self.setup() def setup(self):", "name='bn5c_branch2a') .relu(name='res5c_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False,", "3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5a_branch2b_offset', padding='SAME', initializer='zeros'))", "name='res5a_branch2b') .batch_normalization(relu=False, name='bn5a_branch2b') .relu(name='res5a_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False,", "'bn4e_branch2c') .add(name='res4e') .relu(name='res4e_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False,", "(self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu') .conv(1, 1, 512, 1, 1, biased=False,", ".relu(name='res4b_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4c_branch2a') .batch_normalization(relu=True,", "#pdb.set_trace() # (self.feed('res5b_relu', # 'bn5c_branch2c') # .add(name='res5c') # .relu(name='res5c_relu') #", "(self.feed('rpn_cls_prob') .spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape')) (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info') .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name =", "relu=False, name='res3a_branch2b') .batch_normalization(relu=True, name='bn3a_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False,", ".conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False, name='rpn_bbox_pred')) #========= RoI", "relu=False, name='res4a_branch1', padding='VALID') .batch_normalization(name='bn4a_branch1',is_training=False,relu=False)) (self.feed('res3d_relu') .conv(1, 1, 256, 2, 2,", "'roi-data', 'offset_reshape') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625,", "name='bn5a_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c')", "1, 1, biased=False, relu=False, name='res2a_branch2c') .batch_normalization(name='bn2a_branch2c',is_training=False,relu=False)) (self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu')", "= False, name='rpn_bbox_pred')) #========= RoI Proposal ============ (self.feed('rpn_cls_score') .spatial_reshape_layer(2, name", "1, biased=False, relu=False, name='res4f_branch2c') .batch_normalization(name='bn4f_branch2c',is_training=False,relu=False)) (self.feed('res4e_relu', 'bn4f_branch2c') .add(name='res4f') .relu(name='res4f_relu')) #=========", ".batch_normalization(relu=True, name='bn4f_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4f_branch2c')", "# .add(name='res5c') # .relu(name='res5c_relu') # .fc(n_classes, relu=False, name='cls_score') # .softmax(name='cls_prob'))", "name='bbox_pred', relu=False)) # (self.feed('res4f_relu','roi-data') # .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling') # .conv(1, 1, 512,", "'offset_reshape') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='deformable_roi_pool')", "32] anchor_scales = cfg.ANCHOR_SCALES _feat_stride = [16, ] (self.feed('data') .conv(7,", "name='res5a_branch1', padding='VALID') # .batch_normalization(name='bn5a_branch1',is_training=False,relu=False)) # (self.feed('bn5a_branch2c','bn5a_branch1') # .add(name='res5a') # .relu(name='res5a_relu')", ".conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c') .batch_normalization(name='bn2a_branch2c',is_training=False,relu=False)) (self.feed('bn2a_branch1',", "name='bn5a_branch1')) (self.feed('res4f_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5a_branch2a',", "(self.feed('res4f_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5a_branch2a', padding='VALID')", "(self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info') .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois')) (self.feed('rpn_rois','gt_boxes', 'gt_ishard', 'dontcare_areas') .proposal_target_layer(n_classes,name", "3, 256, 1, 1, biased=False, relu=False, name='res4e_branch2b') .batch_normalization(relu=True, name='bn4e_branch2b',is_training=False) .conv(1,", "RoI Proposal ============ (self.feed('rpn_cls_score') .spatial_reshape_layer(2, name = 'rpn_cls_score_reshape') .spatial_softmax(name='rpn_cls_prob')) (self.feed('rpn_cls_prob')", "'rpn_cls_prob_reshape')) (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info') .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois')) (self.feed('rpn_rois','gt_boxes', 'gt_ishard', 'dontcare_areas')", "] (self.feed('data') .conv(7, 7, 64, 2, 2, relu=False, name='conv1') .batch_normalization(relu=True,", ".conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a', padding='VALID') .batch_normalization(relu=True,", "relu=False, num_deform_group=4, name='res5c_branch2b') .batch_normalization(relu=False, name='bn5c_branch2b') .relu(name='res5c_branch2b_relu') .conv(1, 1, 2048, 1,", ".add(name='res5b') # .relu(name='res5b_relu') # .conv(1, 1, 512, 1, 1, biased=False,", "3, 128, 1, 1, biased=False, relu=False, name='res3d_branch2b') .batch_normalization(relu=True, name='bn3d_branch2b',is_training=False) .conv(1,", "1, 1, biased=False, relu=False, name='res5a_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c')", "'bn4f_branch2c') .add(name='res4f') .relu(name='res4f_relu')) #========= RPN ============ (self.feed('res4f_relu') .conv(3,3,512,1,1,name='rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*2 ,1", ".conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c', padding='VALID') .batch_normalization(relu=False,", "tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas') self.keep_prob = tf.placeholder(tf.float32) self.layers = dict({'data':self.data,", "name='res3d_branch2a') .batch_normalization(relu=True, name='bn3d_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False,", "2, biased=False, relu=False, name='res4a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn4a_branch2a',is_training=False) .conv(3, 3, 256,", "512, 1, 1, biased=False, relu=False, name='res3b_branch2c') .batch_normalization(name='bn3b_branch2c',is_training=False,relu=False)) (self.feed('res3a_relu', 'bn3b_branch2c') .add(name='res3b')", ".add(name='res4a') .relu(name='res4a_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b_branch2a')", "1, 1, biased=False, relu=False, name='res4f_branch2a') .batch_normalization(relu=True, name='bn4f_branch2a',is_training=False) .conv(3, 3, 256,", "initializer='zeros') ) (self.feed('res5c_branch2a_relu', 'res5c_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False,", "name='bn5c_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5c_branch2b')", "num_deform_group=4, name='res5a_branch2b') .batch_normalization(relu=False, name='bn5a_branch2b') .relu(name='res5a_branch2b_relu') .conv(1, 1, 2048, 1, 1,", ".batch_normalization(name='bn4a_branch1',is_training=False,relu=False)) (self.feed('res3d_relu') .conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a',", "relu=False, name='res5b_branch2a') # .batch_normalization(relu=True, name='bn5b_branch2a',is_training=False) # .conv(3, 3, 512, 1,", "relu=False, name='res3b_branch2a') .batch_normalization(relu=True, name='bn3b_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False,", "biased=False, rate=2, relu=False, num_deform_group=4, name='res5c_branch2b') .batch_normalization(relu=False, name='bn5c_branch2b') .relu(name='res5c_branch2b_relu') .conv(1, 1,", "1, relu=False, name='conv_new_1') .relu(name='conv_new_1_relu')) (self.feed('conv_new_1_relu', 'roi-data') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=True,", "# .add(name='res5b') # .relu(name='res5b_relu') # .conv(1, 1, 512, 1, 1,", "trainable self.setup() def setup(self): n_classes = cfg.NCLASSES # anchor_scales =", ".add(name='res2c') .relu(name='res2c_relu') .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1',", "name='res4c_branch2c') .batch_normalization(name='bn4c_branch2c',is_training=False,relu=False)) (self.feed('res4b_relu', 'bn4c_branch2c') .add(name='res4c') .relu(name='res4c_relu') .conv(1, 1, 256, 1,", "1024, 1, 1, biased=False, relu=False, name='res4a_branch2c') .batch_normalization(name='bn4a_branch2c',is_training=False,relu=False)) (self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a')", "1, 1, biased=False, relu=False, name='res5c_branch2b') # .batch_normalization(relu=True, name='bn5c_branch2b',is_training=False) # .conv(1,", ".conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c') # .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False))", "256, 1, 1, biased=False, relu=False, name='res4f_branch2b') .batch_normalization(relu=True, name='bn4f_branch2b',is_training=False) .conv(1, 1,", "'roi-data') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='offset_t')", "relu=False, name='res4c_branch2b') .batch_normalization(relu=True, name='bn4c_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False,", "# .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c') #", "(self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu') .conv(1, 1, 256, 1, 1, relu=False,", ".fc(num_out=1024, name='fc_new_2')) (self.feed('fc_new_2') .fc(num_out=n_classes, name='cls_score', relu=False) .softmax(name='cls_prob')) (self.feed('fc_new_2') .fc(num_out=4*n_classes, name='bbox_pred',", "self.inputs = [] self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3],", ".conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4b_branch2c') .batch_normalization(name='bn4b_branch2c',is_training=False,relu=False)) (self.feed('res4a_relu',", "(self.feed('rpn_cls_score') .spatial_reshape_layer(2, name = 'rpn_cls_score_reshape') .spatial_softmax(name='rpn_cls_prob')) (self.feed('rpn_cls_prob') .spatial_reshape_layer(len(anchor_scales)*3*2, name =", "1, biased=False, relu=False, name='res4e_branch2a') .batch_normalization(relu=True, name='bn4e_branch2a',is_training=False) .conv(3, 3, 256, 1,", "512, 2, 2, biased=False, relu=False, name='res3a_branch1', padding='VALID') .batch_normalization(name='bn3a_branch1',is_training=False,relu=False)) (self.feed('res2c_relu') .conv(1,", "name='res2a_branch2b') .batch_normalization(relu=True, name='bn2a_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False, relu=False,", "biased=False, relu=False, name='res3d_branch2a') .batch_normalization(relu=True, name='bn3d_branch2a',is_training=False) .conv(3, 3, 128, 1, 1,", "1, biased=False, relu=False, name='res4f_branch2a') .batch_normalization(relu=True, name='bn4f_branch2a',is_training=False) .conv(3, 3, 256, 1,", "(self.feed('res4f_relu','roi-data') # .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling') # .conv(1, 1, 512, 2, 2, biased=False,", "1, biased=False, relu=False, name='res3a_branch2b') .batch_normalization(relu=True, name='bn3a_branch2b',is_training=False) .conv(1, 1, 512, 1,", ".conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5c_branch2b') # .batch_normalization(relu=True,", "1, 1, biased=False, relu=False, name='res4d_branch2b') .batch_normalization(relu=True, name='bn4d_branch2b',is_training=False) .conv(1, 1, 1024,", ".batch_normalization(relu=True, name='bn3d_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3d_branch2b')", "name='res4c_branch2a') .batch_normalization(relu=True, name='bn4c_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False,", "name='res4f_branch2b') .batch_normalization(relu=True, name='bn4f_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False,", "1, biased=False, relu=False, name='res2c_branch2a') .batch_normalization(relu=True, name='bn2c_branch2a',is_training=False) .conv(3, 3, 64, 1,", ".batch_normalization(name='bn4a_branch2c',is_training=False,relu=False)) (self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu') .conv(1, 1, 256, 1, 1,", "name='bn3a_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c') .batch_normalization(name='bn3a_branch2c',is_training=False,relu=False))", "cfg.ANCHOR_SCALES _feat_stride = [16, ] (self.feed('data') .conv(7, 7, 64, 2,", ".batch_normalization(name='bn2a_branch1',is_training=False,relu=False)) (self.feed('pool1') .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a')", "256, 1, 1, biased=False, relu=False, name='res4b_branch2a') .batch_normalization(relu=True, name='bn4b_branch2a',is_training=False) .conv(3, 3,", "72, 1, 1, biased=True, rate=2, relu=False, name='res5b_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5b_branch2a_relu',", "name='offset', relu=False) .reshape(shape=(-1,2,7,7), name='offset_reshape')) (self.feed('conv_new_1_relu', 'roi-data', 'offset_reshape') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4,", "name='res5b_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu') .conv(1, 1,", "part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='deformable_roi_pool') .fc(num_out=1024, name='fc_new_1') .fc(num_out=1024, name='fc_new_2')) (self.feed('fc_new_2')", "biased=False, relu=False, name='res2c_branch2b') .batch_normalization(relu=True, name='bn2c_branch2b',is_training=False) .conv(1, 1, 256, 1, 1,", "name='res2b_branch2b') .batch_normalization(relu=True, name='bn2b_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False, relu=False,", ".relu(name='res4e_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4f_branch2a') .batch_normalization(relu=True,", "1, biased=False, relu=False, name='res5a_branch2b') # .batch_normalization(relu=True, name='bn5a_branch2b',is_training=False) # .conv(1, 1,", "as tf from .network import Network from ..fast_rcnn.config import cfg", "num_deform_group=4, name='res5b_branch2b') .batch_normalization(relu=False, name='bn5b_branch2b') .relu(name='res5b_branch2b_relu') .conv(1, 1, 2048, 1, 1,", "# Written by miraclebiu # -------------------------------------------------------- import tensorflow as tf", "name='bn4c_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4c_branch2b') .batch_normalization(relu=True,", ".batch_normalization(relu=True, name='bn5a_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False, relu=False,", "relu=False, name='res5c_branch2b') # .batch_normalization(relu=True, name='bn5c_branch2b',is_training=False) # .conv(1, 1, 2048, 1,", "biased=False, relu=False, name='res3d_branch2b') .batch_normalization(relu=True, name='bn3d_branch2b',is_training=False) .conv(1, 1, 512, 1, 1,", ".relu(name='res3b_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3c_branch2a') .batch_normalization(relu=True,", "256, 1, 1, biased=False, relu=False, name='res4b_branch2b') .batch_normalization(relu=True, name='bn4b_branch2b',is_training=False) .conv(1, 1,", "'bn4d_branch2c') .add(name='res4d') .relu(name='res4d_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False,", "padding='VALID', relu = False, name='rpn_cls_score')) (self.feed('rpn_cls_score', 'gt_boxes', 'gt_ishard', 'dontcare_areas', 'im_info')", "biased=False, relu=False, name='res5b_branch2c') # .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5a_relu', #", "4], name='dontcare_areas') self.keep_prob = tf.placeholder(tf.float32) self.layers = dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\\", "dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\\ 'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas}) self.trainable = trainable", "by miraclebiu # -------------------------------------------------------- import tensorflow as tf from .network", ".conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1', padding='VALID') .batch_normalization(name='bn3a_branch1',is_training=False,relu=False))", "RPN ============ (self.feed('res4f_relu') .conv(3,3,512,1,1,name='rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu", "128, 1, 1, biased=False, relu=False, name='res3a_branch2b') .batch_normalization(relu=True, name='bn3a_branch2b',is_training=False) .conv(1, 1,", ".conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5c_branch2b_offset', padding='SAME',", "2048, 1, 1, biased=False, relu=False, name='res5c_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2c')) (self.feed('res5b_relu',", "biased=False, relu=False, name='res3b_branch2c') .batch_normalization(name='bn3b_branch2c',is_training=False,relu=False)) (self.feed('res3a_relu', 'bn3b_branch2c') .add(name='res3b') .relu(name='res3b_relu') .conv(1, 1,", "1, biased=False, relu=False, name='res4e_branch2c') .batch_normalization(name='bn4e_branch2c',is_training=False,relu=False)) (self.feed('res4d_relu', 'bn4e_branch2c') .add(name='res4e') .relu(name='res4e_relu') .conv(1,", "2, biased=False, relu=False, name='res5a_branch2a', padding='VALID') # .batch_normalization(relu=True, name='bn5a_branch2a',is_training=False) # .conv(3,", ".batch_normalization(relu=True, name='bn5b_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False, relu=False,", "1, biased=False, relu=False, name='res4c_branch2b') .batch_normalization(relu=True, name='bn4c_branch2b',is_training=False) .conv(1, 1, 1024, 1,", ".conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a') .batch_normalization(relu=True, name='bn2a_branch2a',is_training=False)", ".batch_normalization(name='bn3b_branch2c',is_training=False,relu=False)) (self.feed('res3a_relu', 'bn3b_branch2c') .add(name='res3b') .relu(name='res3b_relu') .conv(1, 1, 128, 1, 1,", "name='res5b_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2a') .relu(name='res5b_branch2a_relu') .conv(3, 3, 72, 1, 1,", "relu=False, name='res2a_branch2c') .batch_normalization(name='bn2a_branch2c',is_training=False,relu=False)) (self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu') .conv(1, 1, 64,", "relu=False, name='res4d_branch2a') .batch_normalization(relu=True, name='bn4d_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False,", "padding='VALID') .batch_normalization(name='bn3a_branch1',is_training=False,relu=False)) (self.feed('res2c_relu') .conv(1, 1, 128, 2, 2, biased=False, relu=False,", "rate=2, relu=False, num_deform_group=4, name='res5c_branch2b') .batch_normalization(relu=False, name='bn5c_branch2b') .relu(name='res5c_branch2b_relu') .conv(1, 1, 2048,", ".conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c') .batch_normalization(name='bn2c_branch2c',is_training=False,relu=False)) (self.feed('res2b_relu',", "self.gt_ishard, 'dontcare_areas': self.dontcare_areas}) self.trainable = trainable self.setup() def setup(self): n_classes", "1, biased=True, rate=2, relu=False, name='res5a_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5a_branch2a_relu', 'res5a_branch2b_offset') .deform_conv(3,", "biased=False, relu=False, name='res5b_branch2b') # .batch_normalization(relu=True, name='bn5b_branch2b',is_training=False) # .conv(1, 1, 2048,", ".batch_normalization(relu=True, name='bn2b_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c')", "'res5b_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4,", "__init__(self, trainable=True): self.inputs = [] self.data = tf.placeholder(tf.float32, shape=[None, None,", "512, 1, 1, biased=False, relu=False, name='res5b_branch2a') # .batch_normalization(relu=True, name='bn5b_branch2a',is_training=False) #", "2, 2, biased=False, relu=False, name='res3a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn3a_branch2a',is_training=False) .conv(3, 3,", "padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2a') .relu(name='res5b_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True,", "1, biased=False, relu=False, name='res3c_branch2a') .batch_normalization(relu=True, name='bn3c_branch2a',is_training=False) .conv(3, 3, 128, 1,", "biased=False, relu=False, name='res5a_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu')", "name='res3a_branch2b') .batch_normalization(relu=True, name='bn3a_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False,", ".batch_normalization(relu=True, name='bn4f_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4f_branch2b')", "name='fc_new_2')) (self.feed('fc_new_2') .fc(num_out=n_classes, name='cls_score', relu=False) .softmax(name='cls_prob')) (self.feed('fc_new_2') .fc(num_out=4*n_classes, name='bbox_pred', relu=False))", "name='bn2a_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b') .batch_normalization(relu=True,", "name='res5b_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5b_branch2a_relu', 'res5b_branch2b_offset') .deform_conv(3, 3, 512, 1, 1,", "relu=False, name='res4c_branch2a') .batch_normalization(relu=True, name='bn4c_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False,", ".batch_normalization(relu=False, name='bn5a_branch1')) (self.feed('res4f_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False,", "512, 1, 1, biased=False, relu=False, name='res3c_branch2c') .batch_normalization(name='bn3c_branch2c',is_training=False,relu=False)) (self.feed('res3b_relu', 'bn3c_branch2c') .add(name='res3c')", "1, 1, relu=False, name='conv_new_1') .relu(name='conv_new_1_relu')) (self.feed('conv_new_1_relu', 'roi-data') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4,", "(self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu') .conv(1, 1, 512, 1, 1, biased=False,", "1, 1, biased=False, relu=False, name='res4b_branch2c') .batch_normalization(name='bn4b_branch2c',is_training=False,relu=False)) (self.feed('res4a_relu', 'bn4b_branch2c') .add(name='res4b') .relu(name='res4b_relu')", "1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5a_branch2b') .batch_normalization(relu=False, name='bn5a_branch2b') .relu(name='res5a_branch2b_relu')", "biased=False, rate=2, relu=False, num_deform_group=4, name='res5a_branch2b') .batch_normalization(relu=False, name='bn5a_branch2b') .relu(name='res5a_branch2b_relu') .conv(1, 1,", ".relu(name='res3d_relu') .conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1', padding='VALID')", "relu=False, name='res5c_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2a') .relu(name='res5c_branch2a_relu') .conv(3, 3, 72, 1,", "'dontcare_areas': self.dontcare_areas}) self.trainable = trainable self.setup() def setup(self): n_classes =", "1, 1, biased=False, relu=False, name='res3b_branch2a') .batch_normalization(relu=True, name='bn3b_branch2a',is_training=False) .conv(3, 3, 128,", "relu=False, name='res5a_branch2b') # .batch_normalization(relu=True, name='bn5a_branch2b',is_training=False) # .conv(1, 1, 2048, 1,", "# .batch_normalization(relu=True, name='bn5c_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False,", "'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False,", "Written by miraclebiu # -------------------------------------------------------- import tensorflow as tf from", "relu=False, name='res4d_branch2c') .batch_normalization(name='bn4d_branch2c',is_training=False,relu=False)) (self.feed('res4c_relu', 'bn4d_branch2c') .add(name='res4d') .relu(name='res4d_relu') .conv(1, 1, 256,", "name='res4d_branch2c') .batch_normalization(name='bn4d_branch2c',is_training=False,relu=False)) (self.feed('res4c_relu', 'bn4d_branch2c') .add(name='res4d') .relu(name='res4d_relu') .conv(1, 1, 256, 1,", ".conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c') .batch_normalization(name='bn2b_branch2c',is_training=False,relu=False)) (self.feed('res2a_relu',", "# .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5b_relu', # 'bn5c_branch2c') # .add(name='res5c')", "name='bn2b_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b') .batch_normalization(relu=True,", "relu=False, name='res5a_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5a_branch2a_relu', 'res5a_branch2b_offset') .deform_conv(3, 3, 512, 1,", ".batch_normalization(relu=True, name='bn5a_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False, relu=False,", "name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu') .conv(1, 1, 256, 1, 1,", ".conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5b_branch2b') # .batch_normalization(relu=True,", "import Network from ..fast_rcnn.config import cfg class Resnet50_train(Network): def __init__(self,", "64, 1, 1, biased=False, relu=False, name='res2c_branch2a') .batch_normalization(relu=True, name='bn2c_branch2a',is_training=False) .conv(3, 3,", "512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5a_branch2b') .batch_normalization(relu=False, name='bn5a_branch2b')", "# .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling') # .conv(1, 1, 512, 2, 2, biased=False, relu=False,", "1, 1, biased=False, relu=False, name='res5c_branch2a') # .batch_normalization(relu=True, name='bn5c_branch2a',is_training=False) # .conv(3,", "1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c') # .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False)) #", "relu = False, name='rpn_bbox_pred')) #========= RoI Proposal ============ (self.feed('rpn_cls_score') .spatial_reshape_layer(2,", ".conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4c_branch2c') .batch_normalization(name='bn4c_branch2c',is_training=False,relu=False)) (self.feed('res4b_relu',", ".batch_normalization(relu=True, name='bn3b_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b_branch2c')", "(c) 2016 # Licensed under The MIT License [see LICENSE", "tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes') self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard') self.dontcare_areas", "= cfg.ANCHOR_SCALES _feat_stride = [16, ] (self.feed('data') .conv(7, 7, 64,", "1, biased=False, relu=False, name='res2a_branch2c') .batch_normalization(name='bn2a_branch2c',is_training=False,relu=False)) (self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu') .conv(1,", "name='gt_ishard') self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas') self.keep_prob = tf.placeholder(tf.float32)", "1, 512, 1, 1, biased=False, relu=False, name='res3b_branch2c') .batch_normalization(name='bn3b_branch2c',is_training=False,relu=False)) (self.feed('res3a_relu', 'bn3b_branch2c')", "..fast_rcnn.config import cfg class Resnet50_train(Network): def __init__(self, trainable=True): self.inputs =", "relu=False, name='res5c_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu') .conv(1,", "padding='VALID', relu = False, name='rpn_bbox_pred')) #========= RoI Proposal ============ (self.feed('rpn_cls_score')", "shape=[None, 3], name='im_info') self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes') self.gt_ishard", "tensorflow as tf from .network import Network from ..fast_rcnn.config import", ") (self.feed('res5c_branch2a_relu', 'res5c_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False, rate=2,", "name='bn3c_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3c_branch2c') .batch_normalization(name='bn3c_branch2c',is_training=False,relu=False))", "name='res5c_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu') .conv(1, 1,", "trans_std=1e-1, spatial_scale=0.0625, name='offset_t') # .flatten_data(name='offset_flatten') .fc(num_out=7 * 7 * 2,", "'bn5c_branch2c') # .add(name='res5c') # .relu(name='res5c_relu') # .fc(n_classes, relu=False, name='cls_score') #", "1, 1, biased=False, relu=False, name='res4c_branch2c') .batch_normalization(name='bn4c_branch2c',is_training=False,relu=False)) (self.feed('res4b_relu', 'bn4c_branch2c') .add(name='res4c') .relu(name='res4c_relu')", "name='bn2a_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch2c') .batch_normalization(name='bn2a_branch2c',is_training=False,relu=False))", "1, 1, biased=False, relu=False, name='res2c_branch2b') .batch_normalization(relu=True, name='bn2c_branch2b',is_training=False) .conv(1, 1, 256,", ".batch_normalization(relu=True, name='bn2c_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b')", "(self.feed('rpn_rois','gt_boxes', 'gt_ishard', 'dontcare_areas') .proposal_target_layer(n_classes,name = 'roi-data')) #========= RCNN ============ (self.feed('res4f_relu')", "name='bn3d_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3d_branch2b') .batch_normalization(relu=True,", "padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu') .conv(1, 1, 512,", ".network import Network from ..fast_rcnn.config import cfg class Resnet50_train(Network): def", ".reshape(shape=(-1,2,7,7), name='offset_reshape')) (self.feed('conv_new_1_relu', 'roi-data', 'offset_reshape') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7,", "16, 32] anchor_scales = cfg.ANCHOR_SCALES _feat_stride = [16, ] (self.feed('data')", "padding='VALID') # .batch_normalization(name='bn5a_branch1',is_training=False,relu=False)) # (self.feed('bn5a_branch2c','bn5a_branch1') # .add(name='res5a') # .relu(name='res5a_relu') #", "name='bn5c_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c')", "from ..fast_rcnn.config import cfg class Resnet50_train(Network): def __init__(self, trainable=True): self.inputs", "biased=False, relu=False, name='res2b_branch2c') .batch_normalization(name='bn2b_branch2c',is_training=False,relu=False)) (self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu') .conv(1, 1,", "biased=False, relu=False, name='res5b_branch2a') # .batch_normalization(relu=True, name='bn5b_branch2a',is_training=False) # .conv(3, 3, 512,", "(self.feed('res4f_relu') .conv(3,3,512,1,1,name='rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False,", "#========= RPN ============ (self.feed('res4f_relu') .conv(3,3,512,1,1,name='rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID',", "False, name='rpn_bbox_pred')) #========= RoI Proposal ============ (self.feed('rpn_cls_score') .spatial_reshape_layer(2, name =", "relu=False, name='res5a_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2a') .relu(name='res5a_branch2a_relu') .conv(3, 3, 72, 1,", "256, 1, 1, biased=False, relu=False, name='res2a_branch1') .batch_normalization(name='bn2a_branch1',is_training=False,relu=False)) (self.feed('pool1') .conv(1, 1,", "biased=False, relu=False, name='res4e_branch2c') .batch_normalization(name='bn4e_branch2c',is_training=False,relu=False)) (self.feed('res4d_relu', 'bn4e_branch2c') .add(name='res4e') .relu(name='res4e_relu') .conv(1, 1,", "(self.feed('fc_new_2') .fc(num_out=4*n_classes, name='bbox_pred', relu=False)) # (self.feed('res4f_relu','roi-data') # .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling') # .conv(1,", ".conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3c_branch2b') .batch_normalization(relu=True, name='bn3c_branch2b',is_training=False)", "# .batch_normalization(relu=True, name='bn5c_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False,", "initializer='zeros')) (self.feed('res5b_branch2a_relu', 'res5b_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False, rate=2,", "_feat_stride = [16, ] (self.feed('data') .conv(7, 7, 64, 2, 2,", "# #pdb.set_trace() # (self.feed('res5b_relu', # 'bn5c_branch2c') # .add(name='res5c') # .relu(name='res5c_relu')", "relu=False, name='res3d_branch2b') .batch_normalization(relu=True, name='bn3d_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False,", ".relu(name='res2c_relu') .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1', padding='VALID')", "1, 1, biased=False, relu=False, name='res3c_branch2c') .batch_normalization(name='bn3c_branch2c',is_training=False,relu=False)) (self.feed('res3b_relu', 'bn3c_branch2c') .add(name='res3c') .relu(name='res3c_relu')", "512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5b_branch2b') .batch_normalization(relu=False, name='bn5b_branch2b')", ".relu(name='res4d_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4e_branch2a') .batch_normalization(relu=True,", ".relu(name='res4a_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b_branch2a') .batch_normalization(relu=True,", "name='res3c_branch2b') .batch_normalization(relu=True, name='bn3c_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False,", "'bn3c_branch2c') .add(name='res3c') .relu(name='res3c_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False,", "'rpn_rois')) (self.feed('rpn_rois','gt_boxes', 'gt_ishard', 'dontcare_areas') .proposal_target_layer(n_classes,name = 'roi-data')) #========= RCNN ============", "1, biased=True, rate=2, relu=False, name='res5b_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5b_branch2a_relu', 'res5b_branch2b_offset') .deform_conv(3,", ".conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5a_branch2b_offset', padding='SAME',", "128, 1, 1, biased=False, relu=False, name='res3d_branch2a') .batch_normalization(relu=True, name='bn3d_branch2a',is_training=False) .conv(3, 3,", "biased=False, relu=False, name='res3a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn3a_branch2a',is_training=False) .conv(3, 3, 128, 1,", "# anchor_scales = [8, 16, 32] anchor_scales = cfg.ANCHOR_SCALES _feat_stride", ".conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c') .batch_normalization(name='bn4a_branch2c',is_training=False,relu=False)) (self.feed('bn4a_branch1',", "1, 1, biased=False, relu=False, name='res3c_branch2a') .batch_normalization(relu=True, name='bn3c_branch2a',is_training=False) .conv(3, 3, 128,", "64, 1, 1, biased=False, relu=False, name='res2c_branch2b') .batch_normalization(relu=True, name='bn2c_branch2b',is_training=False) .conv(1, 1,", "1024, 1, 1, biased=False, relu=False, name='res4e_branch2c') .batch_normalization(name='bn4e_branch2c',is_training=False,relu=False)) (self.feed('res4d_relu', 'bn4e_branch2c') .add(name='res4e')", "3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5b_branch2b_offset', padding='SAME', initializer='zeros'))", ".relu(name='res5b_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a', padding='VALID')", "1, biased=False, relu=False, name='res5c_branch2b') # .batch_normalization(relu=True, name='bn5c_branch2b',is_training=False) # .conv(1, 1,", "padding='VALID') .batch_normalization(relu=True, name='bn3a_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False,", "5], name='gt_boxes') self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard') self.dontcare_areas = tf.placeholder(tf.float32,", ".add(name='res3d') .relu(name='res3d_relu') .conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1',", ".conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a') # .batch_normalization(relu=True,", "biased=False, relu=False, name='res4e_branch2b') .batch_normalization(relu=True, name='bn4e_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1,", "spatial_scale=0.0625, name='offset_t') # .flatten_data(name='offset_flatten') .fc(num_out=7 * 7 * 2, name='offset',", ".add(name='res3c') .relu(name='res3c_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3d_branch2a')", "name='dontcare_areas') self.keep_prob = tf.placeholder(tf.float32) self.layers = dict({'data':self.data, 'im_info':self.im_info, 'gt_boxes':self.gt_boxes,\\ 'gt_ishard':", ".relu(name='res5c_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c', padding='VALID')", ".add(name='res3b') .relu(name='res3b_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3c_branch2a')", ".fc(num_out=n_classes, name='cls_score', relu=False) .softmax(name='cls_prob')) (self.feed('fc_new_2') .fc(num_out=4*n_classes, name='bbox_pred', relu=False)) # (self.feed('res4f_relu','roi-data')", ".spatial_softmax(name='rpn_cls_prob')) (self.feed('rpn_cls_prob') .spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape')) (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info') .proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name", "relu=False, name='res3d_branch2a') .batch_normalization(relu=True, name='bn3d_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False,", "Network from ..fast_rcnn.config import cfg class Resnet50_train(Network): def __init__(self, trainable=True):", "relu=False, name='res3b_branch2b') .batch_normalization(relu=True, name='bn3b_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False,", ".batch_normalization(name='bn2a_branch2c',is_training=False,relu=False)) (self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu') .conv(1, 1, 64, 1, 1,", "#========= RoI Proposal ============ (self.feed('rpn_cls_score') .spatial_reshape_layer(2, name = 'rpn_cls_score_reshape') .spatial_softmax(name='rpn_cls_prob'))", "# .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5c_branch2b') #", "name='rpn_cls_score')) (self.feed('rpn_cls_score', 'gt_boxes', 'gt_ishard', 'dontcare_areas', 'im_info') .anchor_target_layer(_feat_stride, anchor_scales, name =", "(self.feed('conv_new_1_relu', 'roi-data') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625,", "relu=False, name='res4a_branch2c') .batch_normalization(name='bn4a_branch2c',is_training=False,relu=False)) (self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu') .conv(1, 1, 256,", "= cfg.NCLASSES # anchor_scales = [8, 16, 32] anchor_scales =", ".add(name='res4f') .relu(name='res4f_relu')) #========= RPN ============ (self.feed('res4f_relu') .conv(3,3,512,1,1,name='rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*2 ,1 ,", "class Resnet50_train(Network): def __init__(self, trainable=True): self.inputs = [] self.data =", "relu=False, name='res2b_branch2c') .batch_normalization(name='bn2b_branch2c',is_training=False,relu=False)) (self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu') .conv(1, 1, 64,", "biased=False, relu=False, name='res3b_branch2b') .batch_normalization(relu=True, name='bn3b_branch2b',is_training=False) .conv(1, 1, 512, 1, 1,", "'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False,", "relu=False, name='res4e_branch2c') .batch_normalization(name='bn4e_branch2c',is_training=False,relu=False)) (self.feed('res4d_relu', 'bn4e_branch2c') .add(name='res4e') .relu(name='res4e_relu') .conv(1, 1, 256,", ".deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5c_branch2b')", "relu=False, name='res5b_branch2c') # .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5a_relu', # 'bn5b_branch2c')", "1, biased=False, relu=False, name='res5b_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2a') .relu(name='res5b_branch2a_relu') .conv(3, 3,", "# .relu(name='res5b_relu') # .conv(1, 1, 512, 1, 1, biased=False, relu=False,", "1, biased=False, relu=False, name='res2c_branch2c') .batch_normalization(name='bn2c_branch2c',is_training=False,relu=False)) (self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu') .conv(1,", "trainable=True): self.inputs = [] self.data = tf.placeholder(tf.float32, shape=[None, None, None,", "relu=False, name='res3c_branch2a') .batch_normalization(relu=True, name='bn3c_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False,", "relu=False, name='res5b_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2a') .relu(name='res5b_branch2a_relu') .conv(3, 3, 72, 1,", "import tensorflow as tf from .network import Network from ..fast_rcnn.config", ".anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' )) # Loss of rpn_cls", "part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='offset_t') # .flatten_data(name='offset_flatten') .fc(num_out=7 * 7", "name='res5c_branch2b') # .batch_normalization(relu=True, name='bn5c_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1,", "name='bn2b_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c') .batch_normalization(name='bn2b_branch2c',is_training=False,relu=False))", "1, biased=False, relu=False, name='res2a_branch2b') .batch_normalization(relu=True, name='bn2a_branch2b',is_training=False) .conv(1, 1, 256, 1,", "= [16, ] (self.feed('data') .conv(7, 7, 64, 2, 2, relu=False,", "1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c') .batch_normalization(name='bn3a_branch2c',is_training=False,relu=False)) (self.feed('bn3a_branch1', 'bn3a_branch2c')", "tf.placeholder(tf.float32, shape=[None, 3], name='im_info') self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes')", "relu=False, name='res5b_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu') .conv(1,", "biased=False, relu=False, name='res5c_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu')", "padding='SAME', initializer='zeros')) (self.feed('res5a_branch2a_relu', 'res5a_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False,", ".fc(num_out=4*n_classes, name='bbox_pred', relu=False)) # (self.feed('res4f_relu','roi-data') # .roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling') # .conv(1, 1,", ".batch_normalization(relu=True, name='bn4d_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4d_branch2c')", "relu=False, name='res4b_branch2c') .batch_normalization(name='bn4b_branch2c',is_training=False,relu=False)) (self.feed('res4a_relu', 'bn4b_branch2c') .add(name='res4b') .relu(name='res4b_relu') .conv(1, 1, 256,", ",1 , 1, padding='VALID', relu = False, name='rpn_cls_score')) (self.feed('rpn_cls_score', 'gt_boxes',", "& rpn_boxes (self.feed('rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu = False,", "biased=False, relu=False, name='res3a_branch2b') .batch_normalization(relu=True, name='bn3a_branch2b',is_training=False) .conv(1, 1, 512, 1, 1,", ".conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4d_branch2b') .batch_normalization(relu=True, name='bn4d_branch2b',is_training=False)", ".batch_normalization(name='bn3a_branch1',is_training=False,relu=False)) (self.feed('res2c_relu') .conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a',", "# .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res5a_branch2a', padding='VALID')", "1, 256, 1, 1, biased=False, relu=False, name='res2b_branch2c') .batch_normalization(name='bn2b_branch2c',is_training=False,relu=False)) (self.feed('res2a_relu', 'bn2b_branch2c')", "biased=False, relu=False, name='res3a_branch2c') .batch_normalization(name='bn3a_branch2c',is_training=False,relu=False)) (self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu') .conv(1, 1,", "#========= RCNN ============ (self.feed('res4f_relu') .conv(1, 1, 2048, 1, 1, biased=False,", "# .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5b_branch2b') #", "= False, name='rpn_cls_score')) (self.feed('rpn_cls_score', 'gt_boxes', 'gt_ishard', 'dontcare_areas', 'im_info') .anchor_target_layer(_feat_stride, anchor_scales,", "(self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu') .conv(1, 1, 512, 2, 2, biased=False,", "1, 1024, 1, 1, biased=False, relu=False, name='res4b_branch2c') .batch_normalization(name='bn4b_branch2c',is_training=False,relu=False)) (self.feed('res4a_relu', 'bn4b_branch2c')", "(self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu') .conv(1, 1, 128, 1, 1, biased=False,", "tf.placeholder(tf.int32, shape=[None], name='gt_ishard') self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas') self.keep_prob", "1, 512, 2, 2, biased=False, relu=False, name='res3a_branch1', padding='VALID') .batch_normalization(name='bn3a_branch1',is_training=False,relu=False)) (self.feed('res2c_relu')", "= tf.placeholder(tf.int32, shape=[None], name='gt_ishard') self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas')", "128, 2, 2, biased=False, relu=False, name='res3a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn3a_branch2a',is_training=False) .conv(3,", ".conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b') .batch_normalization(relu=True, name='bn3a_branch2b',is_training=False)", "padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2a') .relu(name='res5a_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True,", "no_trans=False, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='deformable_roi_pool') .fc(num_out=1024, name='fc_new_1') .fc(num_out=1024, name='fc_new_2'))", "'gt_ishard', 'dontcare_areas', 'im_info') .anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' )) #", "64, 1, 1, biased=False, relu=False, name='res2a_branch2a') .batch_normalization(relu=True, name='bn2a_branch2a',is_training=False) .conv(3, 3,", ".deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5b_branch2b')", "(self.feed('res4d_relu', 'bn4e_branch2c') .add(name='res4e') .relu(name='res4e_relu') .conv(1, 1, 256, 1, 1, biased=False,", ".roi_pool(7,7,1.0/16,name='res5a_branch2a_roipooling') # .conv(1, 1, 512, 2, 2, biased=False, relu=False, name='res5a_branch2a',", ".conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4e_branch2b') .batch_normalization(relu=True, name='bn4e_branch2b',is_training=False)", "name = 'rpn_cls_score_reshape') .spatial_softmax(name='rpn_cls_prob')) (self.feed('rpn_cls_prob') .spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape')) (self.feed('rpn_cls_prob_reshape','rpn_bbox_pred','im_info')", "1, 1, biased=False, relu=False, name='res2b_branch2b') .batch_normalization(relu=True, name='bn2b_branch2b',is_training=False) .conv(1, 1, 256,", "rate=2, relu=False, name='res5a_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5a_branch2a_relu', 'res5a_branch2b_offset') .deform_conv(3, 3, 512,", "1, biased=False, relu=False, name='res5b_branch2b') # .batch_normalization(relu=True, name='bn5b_branch2b',is_training=False) # .conv(1, 1,", "relu=False, name='res4b_branch2b') .batch_normalization(relu=True, name='bn4b_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False,", "1, 1, biased=False, relu=False, name='res4f_branch2c') .batch_normalization(name='bn4f_branch2c',is_training=False,relu=False)) (self.feed('res4e_relu', 'bn4f_branch2c') .add(name='res4f') .relu(name='res4f_relu'))", "num_deform_group=4, name='res5c_branch2b') .batch_normalization(relu=False, name='bn5c_branch2b') .relu(name='res5c_branch2b_relu') .conv(1, 1, 2048, 1, 1,", "relu=False, name='res4a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn4a_branch2a',is_training=False) .conv(3, 3, 256, 1, 1,", "name='res4b_branch2b') .batch_normalization(relu=True, name='bn4b_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False,", "self.trainable = trainable self.setup() def setup(self): n_classes = cfg.NCLASSES #", ".conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b') .batch_normalization(relu=True, name='bn2a_branch2b',is_training=False)", "1, biased=False, relu=False, name='res3a_branch2c') .batch_normalization(name='bn3a_branch2c',is_training=False,relu=False)) (self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu') .conv(1,", "biased=False, relu=False, name='res3c_branch2c') .batch_normalization(name='bn3c_branch2c',is_training=False,relu=False)) (self.feed('res3b_relu', 'bn3c_branch2c') .add(name='res3c') .relu(name='res3c_relu') .conv(1, 1,", "biased=False, relu=False, name='res4c_branch2b') .batch_normalization(relu=True, name='bn4c_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1,", "relu=False, name='res5a_branch1', padding='VALID') # .batch_normalization(name='bn5a_branch1',is_training=False,relu=False)) # (self.feed('bn5a_branch2c','bn5a_branch1') # .add(name='res5a') #", "1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn3a_branch2a',is_training=False)", "name='res4a_branch2c') .batch_normalization(name='bn4a_branch2c',is_training=False,relu=False)) (self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu') .conv(1, 1, 256, 1,", "name='res3d_branch2b') .batch_normalization(relu=True, name='bn3d_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False,", ".batch_normalization(relu=True, name='bn3a_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b')", "1, 1, biased=False, relu=False, name='res4a_branch2b') .batch_normalization(relu=True, name='bn4a_branch2b',is_training=False) .conv(1, 1, 1024,", "name='bn4d_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4d_branch2b') .batch_normalization(relu=True,", "biased=False, relu=False, name='res4c_branch2c') .batch_normalization(name='bn4c_branch2c',is_training=False,relu=False)) (self.feed('res4b_relu', 'bn4c_branch2c') .add(name='res4c') .relu(name='res4c_relu') .conv(1, 1,", "rate=2, relu=False, num_deform_group=4, name='res5b_branch2b') .batch_normalization(relu=False, name='bn5b_branch2b') .relu(name='res5b_branch2b_relu') .conv(1, 1, 2048,", "1, biased=False, relu=False, name='res3b_branch2b') .batch_normalization(relu=True, name='bn3b_branch2b',is_training=False) .conv(1, 1, 512, 1,", "self.im_info = tf.placeholder(tf.float32, shape=[None, 3], name='im_info') self.gt_boxes = tf.placeholder(tf.float32, shape=[None,", ".conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3d_branch2b') .batch_normalization(relu=True, name='bn3d_branch2b',is_training=False)", "256, 1, 1, biased=False, relu=False, name='res4e_branch2a') .batch_normalization(relu=True, name='bn4e_branch2a',is_training=False) .conv(3, 3,", ".batch_normalization(relu=False, name='bn5b_branch2a') .relu(name='res5b_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True, rate=2,", "2, biased=False, relu=False, name='res3a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn3a_branch2a',is_training=False) .conv(3, 3, 128,", "# .conv(1,1,2048,2,2,biased=False, relu=False, name='res5a_branch1', padding='VALID') # .batch_normalization(name='bn5a_branch1',is_training=False,relu=False)) # (self.feed('bn5a_branch2c','bn5a_branch1') #", "name='res5c_branch2b_offset', padding='SAME', initializer='zeros') ) (self.feed('res5c_branch2a_relu', 'res5c_branch2b_offset') .deform_conv(3, 3, 512, 1,", "padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c') .add(name='res5c') .relu(name='res5c_relu') .conv(1, 1, 256,", ".batch_normalization(name='bn4d_branch2c',is_training=False,relu=False)) (self.feed('res4c_relu', 'bn4d_branch2c') .add(name='res4d') .relu(name='res4d_relu') .conv(1, 1, 256, 1, 1,", "(self.feed('res5a_branch2a_roipooling') # .conv(1,1,2048,2,2,biased=False, relu=False, name='res5a_branch1', padding='VALID') # .batch_normalization(name='bn5a_branch1',is_training=False,relu=False)) # (self.feed('bn5a_branch2c','bn5a_branch1')", "biased=False, relu=False, name='res4f_branch2a') .batch_normalization(relu=True, name='bn4f_branch2a',is_training=False) .conv(3, 3, 256, 1, 1,", "# .add(name='res5a') # .relu(name='res5a_relu') # .conv(1, 1, 512, 1, 1,", "= 'roi-data')) #========= RCNN ============ (self.feed('res4f_relu') .conv(1, 1, 2048, 1,", "1, biased=False, relu=False, name='res4e_branch2b') .batch_normalization(relu=True, name='bn4e_branch2b',is_training=False) .conv(1, 1, 1024, 1,", ".conv(1, 1, 1024, 2, 2, biased=False, relu=False, name='res4a_branch1', padding='VALID') .batch_normalization(name='bn4a_branch1',is_training=False,relu=False))", "1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5c_branch2b') .batch_normalization(relu=False, name='bn5c_branch2b') .relu(name='res5c_branch2b_relu') .conv(1,", "name='bn5a_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5a_branch2b')", "1024, 1, 1, biased=False, relu=False, name='res4b_branch2c') .batch_normalization(name='bn4b_branch2c',is_training=False,relu=False)) (self.feed('res4a_relu', 'bn4b_branch2c') .add(name='res4b')", "1, biased=False, relu=False, name='res5b_branch2a') # .batch_normalization(relu=True, name='bn5b_branch2a',is_training=False) # .conv(3, 3,", "1, biased=False, relu=False, name='res5c_branch2a') # .batch_normalization(relu=True, name='bn5c_branch2a',is_training=False) # .conv(3, 3,", "rpn_cls & rpn_boxes (self.feed('rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu =", ".deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='offset_t') #", ".conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4d_branch2c') .batch_normalization(name='bn4d_branch2c',is_training=False,relu=False)) (self.feed('res4c_relu',", "name='offset_t') # .flatten_data(name='offset_flatten') .fc(num_out=7 * 7 * 2, name='offset', relu=False)", "7, 64, 2, 2, relu=False, name='conv1') .batch_normalization(relu=True, name='bn_conv1', is_training=False) .max_pool(3,", "None, 3], name='data') self.im_info = tf.placeholder(tf.float32, shape=[None, 3], name='im_info') self.gt_boxes", "1, 512, 1, 1, biased=False, relu=False, name='res5a_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2a')", "# .batch_normalization(name='bn5a_branch1',is_training=False,relu=False)) # (self.feed('bn5a_branch2c','bn5a_branch1') # .add(name='res5a') # .relu(name='res5a_relu') # .conv(1,", ".relu(name='res5b_relu') # .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a')", "1, 1, biased=False, relu=False, name='res3b_branch2c') .batch_normalization(name='bn3b_branch2c',is_training=False,relu=False)) (self.feed('res3a_relu', 'bn3b_branch2c') .add(name='res3b') .relu(name='res3b_relu')", "[] self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data') self.im_info", "(self.feed('data') .conv(7, 7, 64, 2, 2, relu=False, name='conv1') .batch_normalization(relu=True, name='bn_conv1',", "(self.feed('res3a_relu', 'bn3b_branch2c') .add(name='res3b') .relu(name='res3b_relu') .conv(1, 1, 128, 1, 1, biased=False,", "1, 256, 1, 1, biased=False, relu=False, name='res4c_branch2a') .batch_normalization(relu=True, name='bn4c_branch2a',is_training=False) .conv(3,", "1, 1024, 1, 1, biased=False, relu=False, name='res4c_branch2c') .batch_normalization(name='bn4c_branch2c',is_training=False,relu=False)) (self.feed('res4b_relu', 'bn4c_branch2c')", ".conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b_branch2c') .batch_normalization(name='bn3b_branch2c',is_training=False,relu=False)) (self.feed('res3a_relu',", "(self.feed('res4a_relu', 'bn4b_branch2c') .add(name='res4b') .relu(name='res4b_relu') .conv(1, 1, 256, 1, 1, biased=False,", "1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1') .batch_normalization(name='bn2a_branch1',is_training=False,relu=False)) (self.feed('pool1') .conv(1,", "tf from .network import Network from ..fast_rcnn.config import cfg class", ".add(name='res4e') .relu(name='res4e_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4f_branch2a')", "* 7 * 2, name='offset', relu=False) .reshape(shape=(-1,2,7,7), name='offset_reshape')) (self.feed('conv_new_1_relu', 'roi-data',", "name='offset_reshape')) (self.feed('conv_new_1_relu', 'roi-data', 'offset_reshape') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=256,", ".batch_normalization(relu=True, name='bn5b_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False, relu=False,", "2, relu=False, name='conv1') .batch_normalization(relu=True, name='bn_conv1', is_training=False) .max_pool(3, 3, 2, 2,", "1, biased=False, relu=False, name='res3b_branch2c') .batch_normalization(name='bn3b_branch2c',is_training=False,relu=False)) (self.feed('res3a_relu', 'bn3b_branch2c') .add(name='res3b') .relu(name='res3b_relu') .conv(1,", ".batch_normalization(name='bn2c_branch2c',is_training=False,relu=False)) (self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu') .conv(1, 1, 512, 2, 2,", "1024, 1, 1, biased=False, relu=False, name='res4c_branch2c') .batch_normalization(name='bn4c_branch2c',is_training=False,relu=False)) (self.feed('res4b_relu', 'bn4c_branch2c') .add(name='res4c')", "3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5c_branch2b_offset', padding='SAME', initializer='zeros')", "self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas') self.keep_prob = tf.placeholder(tf.float32) self.layers", "1, 1, biased=False, relu=False, name='res5b_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c')", "# (self.feed('res5a_branch2a_roipooling') # .conv(1,1,2048,2,2,biased=False, relu=False, name='res5a_branch1', padding='VALID') # .batch_normalization(name='bn5a_branch1',is_training=False,relu=False)) #", "# .batch_normalization(relu=True, name='bn5b_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False,", "biased=False, relu=False, name='res4d_branch2b') .batch_normalization(relu=True, name='bn4d_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1,", "256, 1, 1, biased=False, relu=False, name='res4a_branch2b') .batch_normalization(relu=True, name='bn4a_branch2b',is_training=False) .conv(1, 1,", "3, 512, 1, 1, biased=False, relu=False, name='res5a_branch2b') # .batch_normalization(relu=True, name='bn5a_branch2b',is_training=False)", "relu=False, num_deform_group=4, name='res5b_branch2b') .batch_normalization(relu=False, name='bn5b_branch2b') .relu(name='res5b_branch2b_relu') .conv(1, 1, 2048, 1,", "(self.feed('res4f_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch1', padding='VALID')", "name='res3a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn3a_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False,", ".batch_normalization(relu=True, name='bn4e_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4e_branch2b')", ".conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a', padding='VALID') .batch_normalization(relu=False,", "1, 1, biased=False, relu=False, name='res5a_branch2c') # .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False)) # (self.feed('res5a_branch2a_roipooling') #", ".batch_normalization(relu=True, name='bn3a_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c')", "1, biased=False, relu=False, name='res5a_branch2c') # .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False)) # (self.feed('res5a_branch2a_roipooling') # .conv(1,1,2048,2,2,biased=False,", "2048, 1, 1, biased=False, relu=False, name='res5a_branch1', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch1')) (self.feed('res4f_relu')", "shape=[None], name='gt_ishard') self.dontcare_areas = tf.placeholder(tf.float32, shape=[None, 4], name='dontcare_areas') self.keep_prob =", "1, biased=False, relu=False, name='res5c_branch2c') # .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5b_relu',", "name='res4b_branch2a') .batch_normalization(relu=True, name='bn4b_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False,", "biased=False, relu=False, name='res5a_branch2a', padding='VALID') # .batch_normalization(relu=True, name='bn5a_branch2a',is_training=False) # .conv(3, 3,", "1, 1, biased=False, relu=False, name='res2b_branch2c') .batch_normalization(name='bn2b_branch2c',is_training=False,relu=False)) (self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu')", "(self.feed('res5b_branch2a_relu', 'res5b_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False,", "1, 1, biased=False, relu=False, name='res5b_branch2b') # .batch_normalization(relu=True, name='bn5b_branch2b',is_training=False) # .conv(1,", "name='conv1') .batch_normalization(relu=True, name='bn_conv1', is_training=False) .max_pool(3, 3, 2, 2, padding='VALID',name='pool1') .conv(1,", "name='bn5b_branch2a') .relu(name='res5b_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False,", ".batch_normalization(name='bn4b_branch2c',is_training=False,relu=False)) (self.feed('res4a_relu', 'bn4b_branch2c') .add(name='res4b') .relu(name='res4b_relu') .conv(1, 1, 256, 1, 1,", "128, 1, 1, biased=False, relu=False, name='res3c_branch2a') .batch_normalization(relu=True, name='bn3c_branch2a',is_training=False) .conv(3, 3,", "name='res4e_branch2b') .batch_normalization(relu=True, name='bn4e_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False,", "128, 1, 1, biased=False, relu=False, name='res3b_branch2b') .batch_normalization(relu=True, name='bn3b_branch2b',is_training=False) .conv(1, 1,", "= [] self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data')", "relu=False, name='res5a_branch2c') # .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False)) # (self.feed('res5a_branch2a_roipooling') # .conv(1,1,2048,2,2,biased=False, relu=False, name='res5a_branch1',", "name='bn3b_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3b_branch2c') .batch_normalization(name='bn3b_branch2c',is_training=False,relu=False))", "n_classes = cfg.NCLASSES # anchor_scales = [8, 16, 32] anchor_scales", ".conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4f_branch2a') .batch_normalization(relu=True, name='bn4f_branch2a',is_training=False)", "3, 256, 1, 1, biased=False, relu=False, name='res4f_branch2b') .batch_normalization(relu=True, name='bn4f_branch2b',is_training=False) .conv(1,", "'rpn-data' )) # Loss of rpn_cls & rpn_boxes (self.feed('rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*4,", ".relu(name='res5a_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5a_branch2b_offset',", ".conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a', padding='VALID') .batch_normalization(relu=False,", "1, 1, biased=False, relu=False, name='res5b_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2a') .relu(name='res5b_branch2a_relu') .conv(3,", ".batch_normalization(relu=False, name='bn5a_branch2b') .relu(name='res5a_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False,", "name='res5a_branch2a', padding='VALID') # .batch_normalization(relu=True, name='bn5a_branch2a',is_training=False) # .conv(3, 3, 512, 1,", "1, biased=False, relu=False, name='res4d_branch2b') .batch_normalization(relu=True, name='bn4d_branch2b',is_training=False) .conv(1, 1, 1024, 1,", ".relu(name='res5c_relu') .conv(1, 1, 256, 1, 1, relu=False, name='conv_new_1') .relu(name='conv_new_1_relu')) (self.feed('conv_new_1_relu',", ".batch_normalization(relu=True, name='bn5c_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1, biased=False, relu=False,", "biased=False, relu=False, name='res5c_branch2a') # .batch_normalization(relu=True, name='bn5c_branch2a',is_training=False) # .conv(3, 3, 512,", "relu=False, name='res4c_branch2c') .batch_normalization(name='bn4c_branch2c',is_training=False,relu=False)) (self.feed('res4b_relu', 'bn4c_branch2c') .add(name='res4c') .relu(name='res4c_relu') .conv(1, 1, 256,", "2, 2, biased=False, relu=False, name='res3a_branch1', padding='VALID') .batch_normalization(name='bn3a_branch1',is_training=False,relu=False)) (self.feed('res2c_relu') .conv(1, 1,", "name='bn4a_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c') .batch_normalization(name='bn4a_branch2c',is_training=False,relu=False))", "1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a') .batch_normalization(relu=True, name='bn2a_branch2a',is_training=False) .conv(3,", "'dontcare_areas', 'im_info') .anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' )) # Loss", ".proposal_layer(_feat_stride, anchor_scales, 'TRAIN',name = 'rpn_rois')) (self.feed('rpn_rois','gt_boxes', 'gt_ishard', 'dontcare_areas') .proposal_target_layer(n_classes,name =", "(self.feed('res5a_branch2a_relu', 'res5a_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False, rate=2, relu=False,", "256, 2, 2, biased=False, relu=False, name='res4a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn4a_branch2a',is_training=False) .conv(3,", "3, 128, 1, 1, biased=False, relu=False, name='res3c_branch2b') .batch_normalization(relu=True, name='bn3c_branch2b',is_training=False) .conv(1,", "biased=False, relu=False, name='res4f_branch2b') .batch_normalization(relu=True, name='bn4f_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1,", "1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2c'))", "name='res5a_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5a_branch2a_relu', 'res5a_branch2b_offset') .deform_conv(3, 3, 512, 1, 1,", "name='im_info') self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes') self.gt_ishard = tf.placeholder(tf.int32,", "1, 256, 1, 1, biased=False, relu=False, name='res4b_branch2a') .batch_normalization(relu=True, name='bn4b_branch2a',is_training=False) .conv(3,", "'gt_ishard', 'dontcare_areas') .proposal_target_layer(n_classes,name = 'roi-data')) #========= RCNN ============ (self.feed('res4f_relu') .conv(1,", ".conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4c_branch2b') .batch_normalization(relu=True, name='bn4c_branch2b',is_training=False)", "256, 1, 1, biased=False, relu=False, name='res4d_branch2b') .batch_normalization(relu=True, name='bn4d_branch2b',is_training=False) .conv(1, 1,", "'gt_boxes', 'gt_ishard', 'dontcare_areas', 'im_info') .anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' ))", "512, 1, 1, biased=False, relu=False, name='res3a_branch2c') .batch_normalization(name='bn3a_branch2c',is_training=False,relu=False)) (self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a')", "1, 1, biased=False, relu=False, name='res4a_branch2c') .batch_normalization(name='bn4a_branch2c',is_training=False,relu=False)) (self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu')", "2, 2, biased=False, relu=False, name='res5a_branch2a', padding='VALID') # .batch_normalization(relu=True, name='bn5a_branch2a',is_training=False) #", "'bn4c_branch2c') .add(name='res4c') .relu(name='res4c_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False,", "relu=False, name='res2c_branch2b') .batch_normalization(relu=True, name='bn2c_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False,", "3, 128, 1, 1, biased=False, relu=False, name='res3b_branch2b') .batch_normalization(relu=True, name='bn3b_branch2b',is_training=False) .conv(1,", "256, 1, 1, biased=False, relu=False, name='res4d_branch2a') .batch_normalization(relu=True, name='bn4d_branch2a',is_training=False) .conv(3, 3,", "7 * 2, name='offset', relu=False) .reshape(shape=(-1,2,7,7), name='offset_reshape')) (self.feed('conv_new_1_relu', 'roi-data', 'offset_reshape')", "relu=False, name='res3c_branch2b') .batch_normalization(relu=True, name='bn3c_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False,", ".batch_normalization(relu=False, name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu') .conv(1, 1, 512, 1,", "1, 1, biased=False, relu=False, name='res2a_branch2a') .batch_normalization(relu=True, name='bn2a_branch2a',is_training=False) .conv(3, 3, 64,", "anchor_scales, 'TRAIN',name = 'rpn_rois')) (self.feed('rpn_rois','gt_boxes', 'gt_ishard', 'dontcare_areas') .proposal_target_layer(n_classes,name = 'roi-data'))", "name='deformable_roi_pool') .fc(num_out=1024, name='fc_new_1') .fc(num_out=1024, name='fc_new_2')) (self.feed('fc_new_2') .fc(num_out=n_classes, name='cls_score', relu=False) .softmax(name='cls_prob'))", ".fc(n_classes, relu=False, name='cls_score') # .softmax(name='cls_prob')) # (self.feed('res5c_relu') # .fc(n_classes*4, relu=False,", "64, 1, 1, biased=False, relu=False, name='res2a_branch2b') .batch_normalization(relu=True, name='bn2a_branch2b',is_training=False) .conv(1, 1,", "1, 1, biased=False, relu=False, name='res3c_branch2b') .batch_normalization(relu=True, name='bn3c_branch2b',is_training=False) .conv(1, 1, 512,", "name='bn4f_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4f_branch2c') .batch_normalization(name='bn4f_branch2c',is_training=False,relu=False))", "MIT License [see LICENSE for details] # Written by miraclebiu", "name='bn4d_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4d_branch2c') .batch_normalization(name='bn4d_branch2c',is_training=False,relu=False))", "2048, 1, 1, biased=False, relu=False, name='res5b_branch2c') # .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False)) # #pdb.set_trace()", "1, 1, biased=False, relu=False, name='res4e_branch2c') .batch_normalization(name='bn4e_branch2c',is_training=False,relu=False)) (self.feed('res4d_relu', 'bn4e_branch2c') .add(name='res4e') .relu(name='res4e_relu')", "2016 # Licensed under The MIT License [see LICENSE for", ".conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c') # .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False))", "name='bn3b_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b_branch2b') .batch_normalization(relu=True,", "3, 256, 1, 1, biased=False, relu=False, name='res4b_branch2b') .batch_normalization(relu=True, name='bn4b_branch2b',is_training=False) .conv(1,", "2, biased=False, relu=False, name='res4a_branch1', padding='VALID') .batch_normalization(name='bn4a_branch1',is_training=False,relu=False)) (self.feed('res3d_relu') .conv(1, 1, 256,", "padding='VALID') .batch_normalization(relu=True, name='bn4a_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False,", "name='res5c_branch2c') # .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5b_relu', # 'bn5c_branch2c') #", ".conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3c_branch2a') .batch_normalization(relu=True, name='bn3c_branch2a',is_training=False)", "1, 1, biased=False, relu=False, name='res4c_branch2b') .batch_normalization(relu=True, name='bn4c_branch2b',is_training=False) .conv(1, 1, 1024,", ".batch_normalization(relu=False, name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu') .conv(1, 1, 512, 1,", ".add(name='res2b') .relu(name='res2b_relu') .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a')", "name='bn5c_branch2b') .relu(name='res5c_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c',", ".conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c', padding='VALID') .batch_normalization(relu=False,", "2, name='offset', relu=False) .reshape(shape=(-1,2,7,7), name='offset_reshape')) (self.feed('conv_new_1_relu', 'roi-data', 'offset_reshape') .deform_psroi_pool(group_size=1, pooled_size=7,", "padding='VALID') # .batch_normalization(relu=True, name='bn5a_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1,", ".conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3b_branch2b') .batch_normalization(relu=True, name='bn3b_branch2b',is_training=False)", "1, 256, 1, 1, relu=False, name='conv_new_1') .relu(name='conv_new_1_relu')) (self.feed('conv_new_1_relu', 'roi-data') .deform_psroi_pool(group_size=1,", ".relu(name='res5a_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c', padding='VALID')", "padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2a') .relu(name='res5c_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True,", "biased=False, relu=False, name='res4a_branch2b') .batch_normalization(relu=True, name='bn4a_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1,", ".batch_normalization(relu=False, name='bn5a_branch2a') .relu(name='res5a_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True, rate=2,", "(self.feed('res3d_relu') .conv(1, 1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a', padding='VALID')", "relu=False, name='res4f_branch2b') .batch_normalization(relu=True, name='bn4f_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False,", "self.setup() def setup(self): n_classes = cfg.NCLASSES # anchor_scales = [8,", "is_training=False) .max_pool(3, 3, 2, 2, padding='VALID',name='pool1') .conv(1, 1, 256, 1,", ".conv(1, 1, 128, 2, 2, biased=False, relu=False, name='res3a_branch2a', padding='VALID') .batch_normalization(relu=True,", ".batch_normalization(relu=True, name='bn4e_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4e_branch2c')", "# .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c') #", "shape=[None, 5], name='gt_boxes') self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard') self.dontcare_areas =", "Loss of rpn_cls & rpn_boxes (self.feed('rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID',", "relu=False, name='res2b_branch2b') .batch_normalization(relu=True, name='bn2b_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False,", "(self.feed('res4e_relu', 'bn4f_branch2c') .add(name='res4f') .relu(name='res4f_relu')) #========= RPN ============ (self.feed('res4f_relu') .conv(3,3,512,1,1,name='rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*2", "name='res5b_branch2b') .batch_normalization(relu=False, name='bn5b_branch2b') .relu(name='res5b_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False,", "1, biased=False, relu=False, name='res2c_branch2b') .batch_normalization(relu=True, name='bn2c_branch2b',is_training=False) .conv(1, 1, 256, 1,", "128, 1, 1, biased=False, relu=False, name='res3b_branch2a') .batch_normalization(relu=True, name='bn3b_branch2a',is_training=False) .conv(3, 3,", "2048, 1, 1, biased=False, relu=False, name='res5b_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2c')) (self.feed('res5a_relu',", "512, 1, 1, biased=False, relu=False, name='res5a_branch2b') # .batch_normalization(relu=True, name='bn5a_branch2b',is_training=False) #", "# .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a') #", "relu=False, name='res5a_branch1', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch1')) (self.feed('res4f_relu') .conv(1, 1, 512, 1,", "name='res2c_branch2a') .batch_normalization(relu=True, name='bn2c_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False, relu=False,", "512, 1, 1, biased=False, relu=False, name='res3d_branch2c') .batch_normalization(name='bn3d_branch2c',is_training=False,relu=False)) (self.feed('res3c_relu', 'bn3d_branch2c') .add(name='res3d')", "64, 1, 1, biased=False, relu=False, name='res2b_branch2b') .batch_normalization(relu=True, name='bn2b_branch2b',is_training=False) .conv(1, 1,", "1, 1, biased=False, relu=False, name='res4b_branch2b') .batch_normalization(relu=True, name='bn4b_branch2b',is_training=False) .conv(1, 1, 1024,", ".proposal_target_layer(n_classes,name = 'roi-data')) #========= RCNN ============ (self.feed('res4f_relu') .conv(1, 1, 2048,", ".conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5b_branch2b_offset', padding='SAME',", ".batch_normalization(relu=True, name='bn4c_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4c_branch2b')", "= 'rpn-data' )) # Loss of rpn_cls & rpn_boxes (self.feed('rpn_conv/3x3')", "RCNN ============ (self.feed('res4f_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False,", "biased=True, rate=2, relu=False, name='res5b_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5b_branch2a_relu', 'res5b_branch2b_offset') .deform_conv(3, 3,", "'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False,", ".conv(3,3,512,1,1,name='rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu = False, name='rpn_cls_score'))", "padding='SAME', initializer='zeros') ) (self.feed('res5c_branch2a_relu', 'res5c_branch2b_offset') .deform_conv(3, 3, 512, 1, 1,", "cfg.NCLASSES # anchor_scales = [8, 16, 32] anchor_scales = cfg.ANCHOR_SCALES", "name='res2b_branch2c') .batch_normalization(name='bn2b_branch2c',is_training=False,relu=False)) (self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu') .conv(1, 1, 64, 1,", "name='res4e_branch2c') .batch_normalization(name='bn4e_branch2c',is_training=False,relu=False)) (self.feed('res4d_relu', 'bn4e_branch2c') .add(name='res4e') .relu(name='res4e_relu') .conv(1, 1, 256, 1,", "relu=False, name='res3a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn3a_branch2a',is_training=False) .conv(3, 3, 128, 1, 1,", ".conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3b_branch2a') .batch_normalization(relu=True, name='bn3b_branch2a',is_training=False)", "1, 1, biased=False, relu=False, name='res5b_branch2a') # .batch_normalization(relu=True, name='bn5b_branch2a',is_training=False) # .conv(3,", "biased=False, relu=False, name='res4a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn4a_branch2a',is_training=False) .conv(3, 3, 256, 1,", "anchor_scales = cfg.ANCHOR_SCALES _feat_stride = [16, ] (self.feed('data') .conv(7, 7,", "512, 1, 1, biased=False, relu=False, name='res5a_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2a') .relu(name='res5a_branch2a_relu')", "3], name='data') self.im_info = tf.placeholder(tf.float32, shape=[None, 3], name='im_info') self.gt_boxes =", "1, 512, 1, 1, biased=False, relu=False, name='res3d_branch2c') .batch_normalization(name='bn3d_branch2c',is_training=False,relu=False)) (self.feed('res3c_relu', 'bn3d_branch2c')", "biased=False, relu=False, name='res2b_branch2b') .batch_normalization(relu=True, name='bn2b_branch2b',is_training=False) .conv(1, 1, 256, 1, 1,", "name='res2c_branch2c') .batch_normalization(name='bn2c_branch2c',is_training=False,relu=False)) (self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu') .conv(1, 1, 512, 2,", "1, biased=False, relu=False, name='res3c_branch2b') .batch_normalization(relu=True, name='bn3c_branch2b',is_training=False) .conv(1, 1, 512, 1,", ".spatial_reshape_layer(2, name = 'rpn_cls_score_reshape') .spatial_softmax(name='rpn_cls_prob')) (self.feed('rpn_cls_prob') .spatial_reshape_layer(len(anchor_scales)*3*2, name = 'rpn_cls_prob_reshape'))", ".batch_normalization(relu=True, name='bn4c_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4c_branch2c')", "(self.feed('res3c_relu', 'bn3d_branch2c') .add(name='res3d') .relu(name='res3d_relu') .conv(1, 1, 1024, 2, 2, biased=False,", "512, 1, 1, biased=False, relu=False, name='res5b_branch2b') # .batch_normalization(relu=True, name='bn5b_branch2b',is_training=False) #", "'bn5b_branch2c') # .add(name='res5b') # .relu(name='res5b_relu') # .conv(1, 1, 512, 1,", "1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5b_branch2b') .batch_normalization(relu=False, name='bn5b_branch2b') .relu(name='res5b_branch2b_relu')", "trans_std=1e-1, spatial_scale=0.0625, name='deformable_roi_pool') .fc(num_out=1024, name='fc_new_1') .fc(num_out=1024, name='fc_new_2')) (self.feed('fc_new_2') .fc(num_out=n_classes, name='cls_score',", "(self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu') .conv(1, 1, 64, 1, 1, biased=False,", ".add(name='res5b') .relu(name='res5b_relu') .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a',", "3, 512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5b_branch2b') .batch_normalization(relu=False,", "name='res5b_branch2a') # .batch_normalization(relu=True, name='bn5b_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1,", ".add(name='res5c') # .relu(name='res5c_relu') # .fc(n_classes, relu=False, name='cls_score') # .softmax(name='cls_prob')) #", ".conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3c_branch2c') .batch_normalization(name='bn3c_branch2c',is_training=False,relu=False)) (self.feed('res3b_relu',", "3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b') .batch_normalization(relu=True, name='bn2b_branch2b',is_training=False) .conv(1,", "padding='SAME', initializer='zeros')) (self.feed('res5b_branch2a_relu', 'res5b_branch2b_offset') .deform_conv(3, 3, 512, 1, 1, biased=False,", "512, 1, 1, biased=False, relu=False, name='res5c_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2a') .relu(name='res5c_branch2a_relu')", "The MIT License [see LICENSE for details] # Written by", "name='res4f_branch2a') .batch_normalization(relu=True, name='bn4f_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False,", "1, biased=False, relu=False, name='res2b_branch2c') .batch_normalization(name='bn2b_branch2c',is_training=False,relu=False)) (self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu') .conv(1,", "name='bn4f_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4f_branch2b') .batch_normalization(relu=True,", "biased=False, relu=False, name='res5a_branch2c') # .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False)) # (self.feed('res5a_branch2a_roipooling') # .conv(1,1,2048,2,2,biased=False, relu=False,", "'im_info') .anchor_target_layer(_feat_stride, anchor_scales, name = 'rpn-data' )) # Loss of", "1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2a')", "import cfg class Resnet50_train(Network): def __init__(self, trainable=True): self.inputs = []", "name='res4d_branch2b') .batch_normalization(relu=True, name='bn4d_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False,", "biased=False, relu=False, name='res5a_branch2b') # .batch_normalization(relu=True, name='bn5a_branch2b',is_training=False) # .conv(1, 1, 2048,", ".conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4e_branch2c') .batch_normalization(name='bn4e_branch2c',is_training=False,relu=False)) (self.feed('res4d_relu',", "1, 256, 1, 1, biased=False, relu=False, name='res2c_branch2c') .batch_normalization(name='bn2c_branch2c',is_training=False,relu=False)) (self.feed('res2b_relu', 'bn2c_branch2c')", ".batch_normalization(name='bn4c_branch2c',is_training=False,relu=False)) (self.feed('res4b_relu', 'bn4c_branch2c') .add(name='res4c') .relu(name='res4c_relu') .conv(1, 1, 256, 1, 1,", "biased=False, relu=False, name='res3a_branch1', padding='VALID') .batch_normalization(name='bn3a_branch1',is_training=False,relu=False)) (self.feed('res2c_relu') .conv(1, 1, 128, 2,", ".batch_normalization(relu=True, name='bn3c_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3c_branch2b')", "1, 1, biased=False, relu=False, name='res5c_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2a') .relu(name='res5c_branch2a_relu') .conv(3,", "name='res5b_branch2c') # .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5a_relu', # 'bn5b_branch2c') #", "setup(self): n_classes = cfg.NCLASSES # anchor_scales = [8, 16, 32]", ".batch_normalization(relu=True, name='bn3d_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3d_branch2c')", "name='gt_boxes') self.gt_ishard = tf.placeholder(tf.int32, shape=[None], name='gt_ishard') self.dontcare_areas = tf.placeholder(tf.float32, shape=[None,", "1, 1, biased=False, relu=False, name='res4d_branch2c') .batch_normalization(name='bn4d_branch2c',is_training=False,relu=False)) (self.feed('res4c_relu', 'bn4d_branch2c') .add(name='res4d') .relu(name='res4d_relu')", "(self.feed('pool1') .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2a_branch2a') .batch_normalization(relu=True,", "name='res3b_branch2b') .batch_normalization(relu=True, name='bn3b_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False,", "1, 1, biased=False, relu=False, name='res5c_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2c')) (self.feed('res5b_relu', 'bn5c_branch2c')", "# .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False)) # (self.feed('res5a_branch2a_roipooling') # .conv(1,1,2048,2,2,biased=False, relu=False, name='res5a_branch1', padding='VALID') #", "1, biased=False, relu=False, name='res4b_branch2c') .batch_normalization(name='bn4b_branch2c',is_training=False,relu=False)) (self.feed('res4a_relu', 'bn4b_branch2c') .add(name='res4b') .relu(name='res4b_relu') .conv(1,", "name='res5a_branch2b') # .batch_normalization(relu=True, name='bn5a_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1,", "1024, 1, 1, biased=False, relu=False, name='res4f_branch2c') .batch_normalization(name='bn4f_branch2c',is_training=False,relu=False)) (self.feed('res4e_relu', 'bn4f_branch2c') .add(name='res4f')", "(self.feed('bn4a_branch1', 'bn4a_branch2c') .add(name='res4a') .relu(name='res4a_relu') .conv(1, 1, 256, 1, 1, biased=False,", "biased=False, relu=False, name='res5c_branch2b') # .batch_normalization(relu=True, name='bn5c_branch2b',is_training=False) # .conv(1, 1, 2048,", "padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu') .conv(1, 1, 512,", "============ (self.feed('res4f_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch1',", "name='data') self.im_info = tf.placeholder(tf.float32, shape=[None, 3], name='im_info') self.gt_boxes = tf.placeholder(tf.float32,", "[8, 16, 32] anchor_scales = cfg.ANCHOR_SCALES _feat_stride = [16, ]", ".conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3d_branch2a') .batch_normalization(relu=True, name='bn3d_branch2a',is_training=False)", "biased=False, relu=False, name='res5b_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu')", "name='res5a_branch2c') # .batch_normalization(name='bn5a_branch2c',is_training=False,relu=False)) # (self.feed('res5a_branch2a_roipooling') # .conv(1,1,2048,2,2,biased=False, relu=False, name='res5a_branch1', padding='VALID')", ".conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b') .batch_normalization(relu=True, name='bn2c_branch2b',is_training=False)", "name='res4a_branch1', padding='VALID') .batch_normalization(name='bn4a_branch1',is_training=False,relu=False)) (self.feed('res3d_relu') .conv(1, 1, 256, 2, 2, biased=False,", "name='conv_new_1') .relu(name='conv_new_1_relu')) (self.feed('conv_new_1_relu', 'roi-data') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=True, part_size=7, output_dim=256,", "from .network import Network from ..fast_rcnn.config import cfg class Resnet50_train(Network):", "biased=False, relu=False, name='res3d_branch2c') .batch_normalization(name='bn3d_branch2c',is_training=False,relu=False)) (self.feed('res3c_relu', 'bn3d_branch2c') .add(name='res3d') .relu(name='res3d_relu') .conv(1, 1,", "sample_per_part=4, no_trans=False, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='deformable_roi_pool') .fc(num_out=1024, name='fc_new_1') .fc(num_out=1024,", "512, 1, 1, biased=False, relu=False, name='res5b_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2a') .relu(name='res5b_branch2a_relu')", "'bn3d_branch2c') .add(name='res3d') .relu(name='res3d_relu') .conv(1, 1, 1024, 2, 2, biased=False, relu=False,", "self.data = tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data') self.im_info =", "256, 1, 1, biased=False, relu=False, name='res4c_branch2a') .batch_normalization(relu=True, name='bn4c_branch2a',is_training=False) .conv(3, 3,", "biased=False, relu=False, name='res3c_branch2b') .batch_normalization(relu=True, name='bn3c_branch2b',is_training=False) .conv(1, 1, 512, 1, 1,", "name='bn5b_branch2b') .relu(name='res5b_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False, name='res5b_branch2c',", "1, 512, 1, 1, biased=False, relu=False, name='res5c_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2a')", "Resnet50_train(Network): def __init__(self, trainable=True): self.inputs = [] self.data = tf.placeholder(tf.float32,", "72, 1, 1, biased=True, rate=2, relu=False, name='res5a_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5a_branch2a_relu',", "1, 256, 1, 1, biased=False, relu=False, name='res4d_branch2a') .batch_normalization(relu=True, name='bn4d_branch2a',is_training=False) .conv(3,", "biased=False, relu=False, name='res4a_branch1', padding='VALID') .batch_normalization(name='bn4a_branch1',is_training=False,relu=False)) (self.feed('res3d_relu') .conv(1, 1, 256, 2,", ".batch_normalization(name='bn5a_branch1',is_training=False,relu=False)) # (self.feed('bn5a_branch2c','bn5a_branch1') # .add(name='res5a') # .relu(name='res5a_relu') # .conv(1, 1,", ", 1, padding='VALID', relu = False, name='rpn_cls_score')) (self.feed('rpn_cls_score', 'gt_boxes', 'gt_ishard',", "Resnet50 # Copyright (c) 2016 # Licensed under The MIT", "biased=False, relu=False, name='res5c_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2a') .relu(name='res5c_branch2a_relu') .conv(3, 3, 72,", "1, biased=False, relu=False, name='res4c_branch2c') .batch_normalization(name='bn4c_branch2c',is_training=False,relu=False)) (self.feed('res4b_relu', 'bn4c_branch2c') .add(name='res4c') .relu(name='res4c_relu') .conv(1,", "128, 1, 1, biased=False, relu=False, name='res3c_branch2b') .batch_normalization(relu=True, name='bn3c_branch2b',is_training=False) .conv(1, 1,", "'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu') .conv(1, 1, 64, 1, 1, biased=False, relu=False,", ".batch_normalization(name='bn3a_branch2c',is_training=False,relu=False)) (self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu') .conv(1, 1, 128, 1, 1,", "biased=True, rate=2, relu=False, name='res5c_branch2b_offset', padding='SAME', initializer='zeros') ) (self.feed('res5c_branch2a_relu', 'res5c_branch2b_offset') .deform_conv(3,", "256, 1, 1, biased=False, relu=False, name='res2a_branch2c') .batch_normalization(name='bn2a_branch2c',is_training=False,relu=False)) (self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a')", "of rpn_cls & rpn_boxes (self.feed('rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*4, 1, 1, padding='VALID', relu", "1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c') # .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False)) #", "name='bn2c_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2c_branch2b') .batch_normalization(relu=True,", "1, 2048, 1, 1, biased=False, relu=False, name='res5c_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5c_branch2c'))", ".batch_normalization(name='bn2b_branch2c',is_training=False,relu=False)) (self.feed('res2a_relu', 'bn2b_branch2c') .add(name='res2b') .relu(name='res2b_relu') .conv(1, 1, 64, 1, 1,", ".conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a') .batch_normalization(relu=True, name='bn2b_branch2a',is_training=False)", "name='bn4c_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4c_branch2c') .batch_normalization(name='bn4c_branch2c',is_training=False,relu=False))", "biased=False, relu=False, name='res2a_branch2b') .batch_normalization(relu=True, name='bn2a_branch2b',is_training=False) .conv(1, 1, 256, 1, 1,", "'roi-data')) #========= RCNN ============ (self.feed('res4f_relu') .conv(1, 1, 2048, 1, 1,", "name='res2c_branch2b') .batch_normalization(relu=True, name='bn2c_branch2b',is_training=False) .conv(1, 1, 256, 1, 1, biased=False, relu=False,", "relu=False, name='res4f_branch2a') .batch_normalization(relu=True, name='bn4f_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False,", "output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='offset_t') # .flatten_data(name='offset_flatten') .fc(num_out=7 * 7 *", "1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a') # .batch_normalization(relu=True, name='bn5b_branch2a',is_training=False)", "name='res2b_branch2a') .batch_normalization(relu=True, name='bn2b_branch2a',is_training=False) .conv(3, 3, 64, 1, 1, biased=False, relu=False,", ".batch_normalization(name='bn4e_branch2c',is_training=False,relu=False)) (self.feed('res4d_relu', 'bn4e_branch2c') .add(name='res4e') .relu(name='res4e_relu') .conv(1, 1, 256, 1, 1,", "256, 1, 1, biased=False, relu=False, name='res4e_branch2b') .batch_normalization(relu=True, name='bn4e_branch2b',is_training=False) .conv(1, 1,", "(self.feed('bn2a_branch1', 'bn2a_branch2c') .add(name='res2a') .relu(name='res2a_relu') .conv(1, 1, 64, 1, 1, biased=False,", ".add(name='res5a') # .relu(name='res5a_relu') # .conv(1, 1, 512, 1, 1, biased=False,", "no_trans=True, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='offset_t') # .flatten_data(name='offset_flatten') .fc(num_out=7 *", "'gt_boxes':self.gt_boxes,\\ 'gt_ishard': self.gt_ishard, 'dontcare_areas': self.dontcare_areas}) self.trainable = trainable self.setup() def", "2, padding='VALID',name='pool1') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res2a_branch1')", ".batch_normalization(relu=True, name='bn_conv1', is_training=False) .max_pool(3, 3, 2, 2, padding='VALID',name='pool1') .conv(1, 1,", ".relu(name='res2b_relu') .conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a') .batch_normalization(relu=True,", "biased=False, relu=False, name='res4b_branch2c') .batch_normalization(name='bn4b_branch2c',is_training=False,relu=False)) (self.feed('res4a_relu', 'bn4b_branch2c') .add(name='res4b') .relu(name='res4b_relu') .conv(1, 1,", ".conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4c_branch2a') .batch_normalization(relu=True, name='bn4c_branch2a',is_training=False)", "1, biased=False, relu=False, name='res4d_branch2a') .batch_normalization(relu=True, name='bn4d_branch2a',is_training=False) .conv(3, 3, 256, 1,", "# Loss of rpn_cls & rpn_boxes (self.feed('rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*4, 1, 1,", "name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu') .conv(1, 1, 512, 1, 1,", "biased=False, relu=False, name='res5b_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5b_branch2a') .relu(name='res5b_branch2a_relu') .conv(3, 3, 72,", ".conv(1,1,2048,2,2,biased=False, relu=False, name='res5a_branch1', padding='VALID') # .batch_normalization(name='bn5a_branch1',is_training=False,relu=False)) # (self.feed('bn5a_branch2c','bn5a_branch1') # .add(name='res5a')", "# (self.feed('res5b_relu', # 'bn5c_branch2c') # .add(name='res5c') # .relu(name='res5c_relu') # .fc(n_classes,", ".add(name='res4c') .relu(name='res4c_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4d_branch2a')", "2, biased=False, relu=False, name='res3a_branch1', padding='VALID') .batch_normalization(name='bn3a_branch1',is_training=False,relu=False)) (self.feed('res2c_relu') .conv(1, 1, 128,", "(self.feed('res4c_relu', 'bn4d_branch2c') .add(name='res4d') .relu(name='res4d_relu') .conv(1, 1, 256, 1, 1, biased=False,", "name='res3a_branch1', padding='VALID') .batch_normalization(name='bn3a_branch1',is_training=False,relu=False)) (self.feed('res2c_relu') .conv(1, 1, 128, 2, 2, biased=False,", ".relu(name='res5c_branch2a_relu') .conv(3, 3, 72, 1, 1, biased=True, rate=2, relu=False, name='res5c_branch2b_offset',", "biased=False, relu=False, name='res3b_branch2a') .batch_normalization(relu=True, name='bn3b_branch2a',is_training=False) .conv(3, 3, 128, 1, 1,", ".conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5a_branch2a', padding='VALID') .batch_normalization(relu=False,", "1, 64, 1, 1, biased=False, relu=False, name='res2b_branch2a') .batch_normalization(relu=True, name='bn2b_branch2a',is_training=False) .conv(3,", "biased=False, relu=False, name='res5a_branch1', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch1')) (self.feed('res4f_relu') .conv(1, 1, 512,", "1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5b_branch2b') .batch_normalization(relu=False, name='bn5b_branch2b') .relu(name='res5b_branch2b_relu') .conv(1,", "name='bn_conv1', is_training=False) .max_pool(3, 3, 2, 2, padding='VALID',name='pool1') .conv(1, 1, 256,", "1, biased=False, relu=False, name='res5a_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2a') .relu(name='res5a_branch2a_relu') .conv(3, 3,", "rate=2, relu=False, name='res5b_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5b_branch2a_relu', 'res5b_branch2b_offset') .deform_conv(3, 3, 512,", "1, 1, padding='VALID', relu = False, name='rpn_bbox_pred')) #========= RoI Proposal", "1, 1, biased=False, relu=False, name='res5a_branch1', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch1')) (self.feed('res4f_relu') .conv(1,", "============ (self.feed('res4f_relu') .conv(3,3,512,1,1,name='rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*2 ,1 , 1, padding='VALID', relu =", "============ (self.feed('rpn_cls_score') .spatial_reshape_layer(2, name = 'rpn_cls_score_reshape') .spatial_softmax(name='rpn_cls_prob')) (self.feed('rpn_cls_prob') .spatial_reshape_layer(len(anchor_scales)*3*2, name", "name='bn5b_branch2a',is_training=False) # .conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5b_branch2b')", "Proposal ============ (self.feed('rpn_cls_score') .spatial_reshape_layer(2, name = 'rpn_cls_score_reshape') .spatial_softmax(name='rpn_cls_prob')) (self.feed('rpn_cls_prob') .spatial_reshape_layer(len(anchor_scales)*3*2,", "1, 128, 1, 1, biased=False, relu=False, name='res3d_branch2a') .batch_normalization(relu=True, name='bn3d_branch2a',is_training=False) .conv(3,", "3, 64, 1, 1, biased=False, relu=False, name='res2a_branch2b') .batch_normalization(relu=True, name='bn2a_branch2b',is_training=False) .conv(1,", "name='bn4e_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4e_branch2c') .batch_normalization(name='bn4e_branch2c',is_training=False,relu=False))", "512, 1, 1, biased=False, relu=False, name='res5c_branch2b') # .batch_normalization(relu=True, name='bn5c_branch2b',is_training=False) #", "def __init__(self, trainable=True): self.inputs = [] self.data = tf.placeholder(tf.float32, shape=[None,", "anchor_scales, name = 'rpn-data' )) # Loss of rpn_cls &", "1, 2048, 1, 1, biased=False, relu=False, name='res5a_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2c'))", "relu=False, name='res5a_branch2a', padding='VALID') # .batch_normalization(relu=True, name='bn5a_branch2a',is_training=False) # .conv(3, 3, 512,", "sample_per_part=4, no_trans=True, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='offset_t') # .flatten_data(name='offset_flatten') .fc(num_out=7", "3], name='im_info') self.gt_boxes = tf.placeholder(tf.float32, shape=[None, 5], name='gt_boxes') self.gt_ishard =", "1, 256, 2, 2, biased=False, relu=False, name='res4a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn4a_branch2a',is_training=False)", "3, 512, 1, 1, biased=False, relu=False, name='res5b_branch2b') # .batch_normalization(relu=True, name='bn5b_branch2b',is_training=False)", ".conv(1, 1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a') .batch_normalization(relu=True, name='bn2c_branch2a',is_training=False)", "256, 1, 1, biased=False, relu=False, name='res2c_branch2c') .batch_normalization(name='bn2c_branch2c',is_training=False,relu=False)) (self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c')", "relu=False, name='res3b_branch2c') .batch_normalization(name='bn3b_branch2c',is_training=False,relu=False)) (self.feed('res3a_relu', 'bn3b_branch2c') .add(name='res3b') .relu(name='res3b_relu') .conv(1, 1, 128,", ".deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='deformable_roi_pool') .fc(num_out=1024,", "name='res4a_branch2b') .batch_normalization(relu=True, name='bn4a_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False,", "relu=False, name='res4e_branch2a') .batch_normalization(relu=True, name='bn4e_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False,", "2048, 1, 1, biased=False, relu=False, name='res5a_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2c')) (self.feed('bn5a_branch1',", "name='res3d_branch2c') .batch_normalization(name='bn3d_branch2c',is_training=False,relu=False)) (self.feed('res3c_relu', 'bn3d_branch2c') .add(name='res3d') .relu(name='res3d_relu') .conv(1, 1, 1024, 2,", ".conv(3, 3, 512, 1, 1, biased=False, relu=False, name='res5a_branch2b') # .batch_normalization(relu=True,", "biased=False, relu=False, name='res2a_branch2a') .batch_normalization(relu=True, name='bn2a_branch2a',is_training=False) .conv(3, 3, 64, 1, 1,", "1, 1, biased=False, relu=False, name='res3a_branch2b') .batch_normalization(relu=True, name='bn3a_branch2b',is_training=False) .conv(1, 1, 512,", "1, 1, biased=True, rate=2, relu=False, name='res5b_branch2b_offset', padding='SAME', initializer='zeros')) (self.feed('res5b_branch2a_relu', 'res5b_branch2b_offset')", "name='bn5b_branch2c')) (self.feed('res5a_relu', 'bn5b_branch2c') .add(name='res5b') .relu(name='res5b_relu') .conv(1, 1, 512, 1, 1,", ".conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3a_branch2c') .batch_normalization(name='bn3a_branch2c',is_training=False,relu=False)) (self.feed('bn3a_branch1',", "1, biased=False, relu=False, name='res2a_branch1') .batch_normalization(name='bn2a_branch1',is_training=False,relu=False)) (self.feed('pool1') .conv(1, 1, 64, 1,", ".relu(name='res5c_relu') # .fc(n_classes, relu=False, name='cls_score') # .softmax(name='cls_prob')) # (self.feed('res5c_relu') #", "1, 128, 1, 1, biased=False, relu=False, name='res3b_branch2a') .batch_normalization(relu=True, name='bn3b_branch2a',is_training=False) .conv(3,", "name='res5b_branch2b') # .batch_normalization(relu=True, name='bn5b_branch2b',is_training=False) # .conv(1, 1, 2048, 1, 1,", "name='res5a_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2a') .relu(name='res5a_branch2a_relu') .conv(3, 3, 72, 1, 1,", "biased=False, relu=False, name='res2b_branch2a') .batch_normalization(relu=True, name='bn2b_branch2a',is_training=False) .conv(3, 3, 64, 1, 1,", "1, 1, biased=False, relu=False, name='res2a_branch2b') .batch_normalization(relu=True, name='bn2a_branch2b',is_training=False) .conv(1, 1, 256,", "name='res4a_branch2a', padding='VALID') .batch_normalization(relu=True, name='bn4a_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False,", ")) # Loss of rpn_cls & rpn_boxes (self.feed('rpn_conv/3x3') .conv(1,1,len(anchor_scales)*3*4, 1,", "1, 64, 1, 1, biased=False, relu=False, name='res2c_branch2a') .batch_normalization(relu=True, name='bn2c_branch2a',is_training=False) .conv(3,", ".batch_normalization(name='bn3c_branch2c',is_training=False,relu=False)) (self.feed('res3b_relu', 'bn3c_branch2c') .add(name='res3c') .relu(name='res3c_relu') .conv(1, 1, 128, 1, 1,", "1, biased=False, relu=False, name='res4b_branch2a') .batch_normalization(relu=True, name='bn4b_branch2a',is_training=False) .conv(3, 3, 256, 1,", "512, 1, 1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5c_branch2b') .batch_normalization(relu=False, name='bn5c_branch2b')", ".relu(name='res5a_relu') # .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res5b_branch2a')", "1, 512, 2, 2, biased=False, relu=False, name='res5a_branch2a', padding='VALID') # .batch_normalization(relu=True,", "1, 1, biased=False, relu=False, name='res5b_branch2c') # .batch_normalization(name='bn5b_branch2c',is_training=False,relu=False)) # #pdb.set_trace() #", "relu=False, name='res3d_branch2c') .batch_normalization(name='bn3d_branch2c',is_training=False,relu=False)) (self.feed('res3c_relu', 'bn3d_branch2c') .add(name='res3d') .relu(name='res3d_relu') .conv(1, 1, 1024,", "[see LICENSE for details] # Written by miraclebiu # --------------------------------------------------------", "(self.feed('conv_new_1_relu', 'roi-data', 'offset_reshape') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7, output_dim=256, trans_std=1e-1,", "name='res5a_branch1', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch1')) (self.feed('res4f_relu') .conv(1, 1, 512, 1, 1,", "output_dim=256, trans_std=1e-1, spatial_scale=0.0625, name='deformable_roi_pool') .fc(num_out=1024, name='fc_new_1') .fc(num_out=1024, name='fc_new_2')) (self.feed('fc_new_2') .fc(num_out=n_classes,", "# (self.feed('bn5a_branch2c','bn5a_branch1') # .add(name='res5a') # .relu(name='res5a_relu') # .conv(1, 1, 512,", "3, 2, 2, padding='VALID',name='pool1') .conv(1, 1, 256, 1, 1, biased=False,", "name = 'rpn-data' )) # Loss of rpn_cls & rpn_boxes", "'dontcare_areas') .proposal_target_layer(n_classes,name = 'roi-data')) #========= RCNN ============ (self.feed('res4f_relu') .conv(1, 1,", "tf.placeholder(tf.float32, shape=[None, None, None, 3], name='data') self.im_info = tf.placeholder(tf.float32, shape=[None,", "rate=2, relu=False, num_deform_group=4, name='res5a_branch2b') .batch_normalization(relu=False, name='bn5a_branch2b') .relu(name='res5a_branch2b_relu') .conv(1, 1, 2048,", "256, 1, 1, relu=False, name='conv_new_1') .relu(name='conv_new_1_relu')) (self.feed('conv_new_1_relu', 'roi-data') .deform_psroi_pool(group_size=1, pooled_size=7,", "name='bn4e_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4e_branch2b') .batch_normalization(relu=True,", "1, 1, biased=False, relu=False, name='res3b_branch2b') .batch_normalization(relu=True, name='bn3b_branch2b',is_training=False) .conv(1, 1, 512,", "1, padding='VALID', relu = False, name='rpn_cls_score')) (self.feed('rpn_cls_score', 'gt_boxes', 'gt_ishard', 'dontcare_areas',", "1, 512, 1, 1, biased=False, relu=False, name='res3c_branch2c') .batch_normalization(name='bn3c_branch2c',is_training=False,relu=False)) (self.feed('res3b_relu', 'bn3c_branch2c')", ".batch_normalization(relu=False, name='bn5b_branch2b') .relu(name='res5b_branch2b_relu') .conv(1, 1, 2048, 1, 1, biased=False, relu=False,", "name='bn3a_branch2a',is_training=False) .conv(3, 3, 128, 1, 1, biased=False, relu=False, name='res3a_branch2b') .batch_normalization(relu=True,", "1, 1, biased=False, relu=False, name='res3d_branch2c') .batch_normalization(name='bn3d_branch2c',is_training=False,relu=False)) (self.feed('res3c_relu', 'bn3d_branch2c') .add(name='res3d') .relu(name='res3d_relu')", "# TFFRCNN - Resnet50 # Copyright (c) 2016 # Licensed", "name='bn3d_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3d_branch2c') .batch_normalization(name='bn3d_branch2c',is_training=False,relu=False))", "biased=False, relu=False, name='res5a_branch2a', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2a') .relu(name='res5a_branch2a_relu') .conv(3, 3, 72,", "relu=False, name='res3c_branch2c') .batch_normalization(name='bn3c_branch2c',is_training=False,relu=False)) (self.feed('res3b_relu', 'bn3c_branch2c') .add(name='res3c') .relu(name='res3c_relu') .conv(1, 1, 128,", "relu=False, name='res5a_branch2c', padding='VALID') .batch_normalization(relu=False, name='bn5a_branch2c')) (self.feed('bn5a_branch1', 'bn5a_branch2c') .add(name='res5a') .relu(name='res5a_relu') .conv(1,", ".batch_normalization(relu=True, name='bn4d_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4d_branch2b')", "False, name='rpn_cls_score')) (self.feed('rpn_cls_score', 'gt_boxes', 'gt_ishard', 'dontcare_areas', 'im_info') .anchor_target_layer(_feat_stride, anchor_scales, name", ".conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4b_branch2a') .batch_normalization(relu=True, name='bn4b_branch2a',is_training=False)", ".relu(name='res3c_relu') .conv(1, 1, 128, 1, 1, biased=False, relu=False, name='res3d_branch2a') .batch_normalization(relu=True,", "2, 2, biased=False, relu=False, name='res4a_branch1', padding='VALID') .batch_normalization(name='bn4a_branch1',is_training=False,relu=False)) (self.feed('res3d_relu') .conv(1, 1,", ".batch_normalization(relu=True, name='bn4b_branch2a',is_training=False) .conv(3, 3, 256, 1, 1, biased=False, relu=False, name='res4b_branch2b')", "1, 1, biased=False, relu=False, name='res2a_branch1') .batch_normalization(name='bn2a_branch1',is_training=False,relu=False)) (self.feed('pool1') .conv(1, 1, 64,", "biased=False, relu=False, name='res4c_branch2a') .batch_normalization(relu=True, name='bn4c_branch2a',is_training=False) .conv(3, 3, 256, 1, 1,", ".batch_normalization(relu=True, name='bn3c_branch2b',is_training=False) .conv(1, 1, 512, 1, 1, biased=False, relu=False, name='res3c_branch2c')", "relu=False) .reshape(shape=(-1,2,7,7), name='offset_reshape')) (self.feed('conv_new_1_relu', 'roi-data', 'offset_reshape') .deform_psroi_pool(group_size=1, pooled_size=7, sample_per_part=4, no_trans=False,", ".conv(3, 3, 64, 1, 1, biased=False, relu=False, name='res2b_branch2b') .batch_normalization(relu=True, name='bn2b_branch2b',is_training=False)", ".batch_normalization(relu=True, name='bn4a_branch2b',is_training=False) .conv(1, 1, 1024, 1, 1, biased=False, relu=False, name='res4a_branch2c')", "1, 1, biased=False, relu=False, name='res3d_branch2b') .batch_normalization(relu=True, name='bn3d_branch2b',is_training=False) .conv(1, 1, 512,", ".batch_normalization(name='bn5b_branch2c',is_training=False,relu=False)) # #pdb.set_trace() # (self.feed('res5a_relu', # 'bn5b_branch2c') # .add(name='res5b') #", ".add(name='res4d') .relu(name='res4d_relu') .conv(1, 1, 256, 1, 1, biased=False, relu=False, name='res4e_branch2a')", "name='res3a_branch2c') .batch_normalization(name='bn3a_branch2c',is_training=False,relu=False)) (self.feed('bn3a_branch1', 'bn3a_branch2c') .add(name='res3a') .relu(name='res3a_relu') .conv(1, 1, 128, 1,", "1, 1, biased=False, relu=False, name='res5c_branch2c') # .batch_normalization(name='bn5c_branch2c',is_training=False,relu=False)) # #pdb.set_trace() #", "# #pdb.set_trace() # (self.feed('res5a_relu', # 'bn5b_branch2c') # .add(name='res5b') # .relu(name='res5b_relu')", "1, biased=False, rate=2, relu=False, num_deform_group=4, name='res5a_branch2b') .batch_normalization(relu=False, name='bn5a_branch2b') .relu(name='res5a_branch2b_relu') .conv(1,", "relu=False, name='res2c_branch2c') .batch_normalization(name='bn2c_branch2c',is_training=False,relu=False)) (self.feed('res2b_relu', 'bn2c_branch2c') .add(name='res2c') .relu(name='res2c_relu') .conv(1, 1, 512,", "2, 2, padding='VALID',name='pool1') .conv(1, 1, 256, 1, 1, biased=False, relu=False," ]
[ "session, ids, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False,", "principal specifications. A principal specification is a principal id or", "{value}\") else: LOGGER.debug(f\"Visiting single account: {value}\") yield value return target_iterator", "context.principal_filter, (principal_type, principal_id, principal_name)): if context.principal: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\")", "and name (name may be None), returning True if the", "target)] else: raise TypeError(f\"Invalid target {target}\") elif _is_target_tuple(target): target_type, target_id", "in [\"GROUP\", \"USER\"], isinstance(principal[1], str), ]) except: return False def", "(principal_type, principal_id, principal_name)): if context.principal: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") else:", "= sso_admin_client.get_paginator(\"list_account_assignments\") for response in assignments_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id, PermissionSetArn=permission_set_arn): LOGGER.debug(f\"ListAccountAssignments", "!= principal_type) if type_matches and principal[1] == principal_id: LOGGER.debug(f\"Found principal", "list(itertools.chain(*list_of_lists)) def _is_principal_tuple(principal): try: return all([ len(principal) == 2, isinstance(principal[0],", "TypeError(f\"Invalid target {target}\") elif _is_target_tuple(target): target_type, target_id = target if", "return all([ len(target) == 2, isinstance(target[0], str), target[0] in [\"AWS_OU\",", "except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None else: raise ValueError(f\"Unknown principal type", "principal_type) if type_matches and principal[1] == principal_id: LOGGER.debug(f\"Found principal {principal_type}:{principal_id}\")", "those in child OUs. Returns: An iterator over Assignment namedtuples", "value return target_iterator def _get_target_iterator(context: _Context): if context.target: iterables =", "in response[\"Accounts\"]: account_id = account[\"Id\"] account_name = account[\"Name\"] value =", "if context.get_target_names: organizations_client = context.session.client(\"organizations\") account = organizations_client.describe_account(AccountId=target[1])[\"Account\"] if account.get(\"Name\"):", "permission set arn and name (name may be None), returning", "permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, cache=cache, filter_cache=filter_cache, )", "permission_set_name)}\") continue LOGGER.debug(f\"Visiting permission set: {(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id,", "target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): assignment = Assignment( ids.instance_arn, principal_type,", "re import numbers import collections import logging from collections.abc import", "_get_account_iterator(target, context: _Context): def target_iterator(): target_name = None if context.get_target_names:", "instance to use, or it will be looked up using", "value = (*target, target_name) if not _filter(context.filter_cache, value[1], context.target_filter, value):", "get_permission_set_names=False, get_target_names=False, ou_recursive=False): \"\"\"Iterate over AWS SSO assignments. Args: session", "taking target type, target id, and target name (which may", "[_get_single_permission_set_iterator(ps, context) for ps in context.permission_set] def permission_set_iterator(target_type, target_id, target_name):", "= Assignment( ids.instance_arn, principal_type, principal_id, principal_name, permission_set_arn, permission_set_name, target_type, target_id,", "response = identity_store_client.describe_user( IdentityStoreId=context.ids.identity_store_id, UserId=principal_id ) LOGGER.debug(f\"DescribeUser response: {response}\") context.cache[principal_key]", "in principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): assignment =", "over AWS SSO assignments. Args: session (boto3.Session): boto3 session to", "names are being retrieved or it will be looked up", "_get_all_accounts_iterator(context) def _get_single_permission_set_iterator(permission_set, context: _Context): permission_set_arn = permission_set permission_set_id =", "{(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def _get_permission_set_iterator(context:", "_Context): def principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): if", "[(\"AWS_OU\", target)] else: raise TypeError(f\"Invalid target {target}\") elif _is_target_tuple(target): target_type,", "if principal names are being retrieved or it will be", "if ou.get(\"Name\"): # target_name = ou(\"Name\") value = (*target, target_name)", "Assignment = collections.namedtuple(\"Assignment\", [ \"instance_arn\", \"principal_type\", \"principal_id\", \"principal_name\", \"permission_set_arn\", \"permission_set_name\",", "accounts: yield \"AWS_ACCOUNT\", account[\"Id\"], account[\"Name\"] return target_iterator def _get_single_target_iterator(target, context:", "None else: raise ValueError(f\"Unknown principal type {principal_type}\") principal_name = context.cache[principal_key]", "= _get_target_iterator(context) permission_set_iterator = _get_permission_set_iterator(context) principal_iterator = _get_principal_iterator(context) for target_type,", "principal_id) if not context.get_principal_names: principal_name = None else: if principal_key", "= {} context = _Context( session = session, ids=ids, principal=principal,", "id, or a 2-tuple of target type, which is either", "if isinstance(target, numbers.Number): return [(\"AWS_ACCOUNT\", format_account_id(target))] if isinstance(target, str): if", "\"get_principal_names\", \"get_permission_set_names\", \"get_target_names\", \"ou_recursive\", \"cache\", \"filter_cache\" ]) def _filter(filter_cache, key,", "should be included. permission_set: A permission set arn or id,", "match principals\") continue principal_key = (principal_type, principal_id) if not context.get_principal_names:", "target_iterator = _get_target_iterator(context) permission_set_iterator = _get_permission_set_iterator(context) principal_iterator = _get_principal_iterator(context) for", "response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\") permission_set_name", "id {permission_set}\") return [permission_set_arn] def _is_target_tuple(target): try: return all([ len(target)", "for response in permission_sets_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id): LOGGER.debug(f\"ListPermissionSetsProvisionedToAccount {target_id} page: {response}\")", "TypeError(f\"Invalid target type {target_type}\") return [(target_type, target_id)] else: value =", "an account or OU id, or a 2-tuple of target", "str): if re.match(r\"^\\d+$\", target): return [(\"AWS_ACCOUNT\", format_account_id(target))] elif re.match(r\"^r-[a-z0-9]{4,32}$\", target)", "value return target_iterator def _get_ou_iterator(target, context: _Context): def target_iterator(): target_name", "LOGGER.setLevel(getattr(logging, v)) else: kwargs = json.loads(v) def fil(*args): print(args) return", "returning True if the principal should be included. permission_set: A", "return _get_all_permission_sets_iterator(context) def _get_principal_iterator(context: _Context): def principal_iterator( target_type, target_id, target_name,", "may be None), returning True if the permission set should", "= permission_set_arn.split(\"/\", 2)[-1] if not context.get_permission_set_names: permission_set_name = None else:", "target_name): if target_type != \"AWS_ACCOUNT\": raise TypeError(f\"Unsupported target type {target_type}\")", "\"PermissionSets\" not in response: continue for permission_set_arn in response[\"PermissionSets\"]: permission_set_id", "def target_iterator(): target_name = None # if context.get_target_names: # organizations_client", "== \"USER\": try: response = identity_store_client.describe_user( IdentityStoreId=context.ids.identity_store_id, UserId=principal_id ) LOGGER.debug(f\"DescribeUser", "and principal name (which may be None), and returning True", "context.cache[permission_set_arn] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Permission set", "[(\"AWS_ACCOUNT\", format_account_id(target))] if isinstance(target, str): if re.match(r\"^\\d+$\", target): return [(\"AWS_ACCOUNT\",", "instance_arn (str): The SSO instance to use, or it will", "type_matches = (principal[0] is None or principal[0] != principal_type) if", "A target specification or list of target specifications. A target", "try: response = identity_store_client.describe_group( IdentityStoreId=context.ids.identity_store_id, GroupId=principal_id ) LOGGER.debug(f\"DescribeGroup response: {response}\")", "try: response = identity_store_client.describe_user( IdentityStoreId=context.ids.identity_store_id, UserId=principal_id ) LOGGER.debug(f\"DescribeUser response: {response}\")", "import sys import json logging.basicConfig(level=logging.INFO) kwargs = {} for v", "may be None), and returning True if the target should", "permission_set.startswith(\"ps-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}\" else: raise TypeError(f\"Invalid permission set id", "permission_set_id, permission_set_name return permission_set_iterator def _get_all_permission_sets_iterator(context: _Context): def permission_set_iterator(target_type, target_id,", "in permission_set_iterator(target_type, target_id, target_name): for principal_type, principal_id, principal_name in principal_iterator(", "set arn and name (name may be None), returning True", "AWS SSO assignments. Args: session (boto3.Session): boto3 session to use", "target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): principal = _process_principal(principal) permission_set", "= context.session.client(\"organizations\") account = organizations_client.describe_account(AccountId=target[1])[\"Account\"] if account.get(\"Name\"): target_name = account[\"Name\"]", "[(\"AWS_ACCOUNT\", format_account_id(target))] elif re.match(r\"^r-[a-z0-9]{4,32}$\", target) or re.match(r\"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$\", target): return [(\"AWS_OU\",", "return [(None, principal)] if _is_principal_tuple(principal): return [tuple(principal)] else: return _flatten(_process_principal(p)", "else: raise TypeError(f\"Invalid target type {target_type}\") def _get_all_accounts_iterator(context: _Context): def", "get all accounts including those in child OUs. Returns: An", "target_iterator else: LOGGER.debug(f\"Iterating for all accounts\") return _get_all_accounts_iterator(context) def _get_single_permission_set_iterator(permission_set,", "or it will be looked up using ListInstances principal: A", "{response}\") if not response[\"AccountAssignments\"] and not \"NextToken\" in response: LOGGER.debug(f\"No", "to use instance_arn (str): The SSO instance to use, or", "\"AWS_ACCOUNT\"], isinstance(target[1], str), ]) except: return False def _process_target(target): if", "target_name) for it in iterables]) return permission_set_iterator else: LOGGER.debug(\"Iterating for", "for {target_id} {permission_set_arn.split('/')[-1]}\") for assignment in response[\"AccountAssignments\"]: principal_type = assignment[\"PrincipalType\"]", "be included. target: A target specification or list of target", "= assignment[\"PrincipalId\"] LOGGER.debug(f\"Visiting principal {principal_type}:{principal_id}\") if context.principal: for principal in", "(which may be None), and returning True if the principal", "in assignments. get_permission_set_names (bool): Retrieve names for permission sets in", "principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, )", "_Context): def target_iterator(): organizations_client = context.session.client(\"organizations\") accounts_paginator = organizations_client.get_paginator(\"list_accounts\") for", "for response in accounts_paginator.paginate(): LOGGER.debug(f\"ListAccounts page: {response}\") for account in", "2-tuple of target type, which is either AWS_ACCOUNT or AWS_OU,", "2-tuple of principal type and id. principal_filter: A callable taking", "TypeError(f\"Unsupported target type {target_type}\") sso_admin_client = context.session.client(\"sso-admin\") identity_store_client = context.session.client(\"identitystore\")", "None), and returning True if the target should be included.", "= json.loads(v) def fil(*args): print(args) return True kwargs[\"target_filter\"] = fil", "principal type, principal id, and principal name (which may be", "= response[\"DisplayName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None elif principal_type ==", "permission_sets_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id): LOGGER.debug(f\"ListPermissionSetsProvisionedToAccount {target_id} page: {response}\") if \"PermissionSets\" not", "value = _flatten(_process_target(t) for t in target) return value def", "= _flatten(_process_target(t) for t in target) return value def _get_account_iterator(target,", "value in list_assignments(session, **kwargs): print(\",\".join(v or \"\" for v in", "is filtered: {value}\") continue LOGGER.debug(f\"Visiting account: {value}\") yield value return", "iterables = [_get_single_permission_set_iterator(ps, context) for ps in context.permission_set] def permission_set_iterator(target_type,", "context.session.client(\"sso-admin\") permission_sets_paginator = sso_admin_client.get_paginator(\"list_permission_sets_provisioned_to_account\") for response in permission_sets_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id):", "f\"arn:aws:sso:::permissionSet/{permission_set}\" elif permission_set.startswith(\"ps-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}\" else: raise TypeError(f\"Invalid permission", "args): if not func: return True if key not in", "arn and name (name may be None), returning True if", "(*target, target_name) if not _filter(context.filter_cache, value[1], context.target_filter, value): LOGGER.debug(f\"Account is", "key, func, args): if not func: return True if key", "is either AWS_ACCOUNT or AWS_OU, and target id. target_filter: A", "def target_iterator(): target_name = None if context.get_target_names: organizations_client = context.session.client(\"organizations\")", "assignment: {assignment}\") yield assignment if __name__ == \"__main__\": import boto3", "LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") else: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") continue", "{target_type}\") def _get_all_accounts_iterator(context: _Context): def target_iterator(): organizations_client = context.session.client(\"organizations\") accounts_paginator", "kwargs = {} for v in sys.argv[1:]: if hasattr(logging, v):", "LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") continue LOGGER.debug(f\"Visiting principal: {principal_type}:{principal_id}\") yield principal_type,", "will be looked up using ListInstances principal: A principal specification", "not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Single permission set is", "IdentityStoreId=context.ids.identity_store_id, GroupId=principal_id ) LOGGER.debug(f\"DescribeGroup response: {response}\") context.cache[principal_key] = response[\"DisplayName\"] except", "if context.get_target_names: # organizations_client = context.session.client(\"organizations\") # ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])[\"OrganizationalUnit\"]", "not target: return None if isinstance(target, numbers.Number): return [(\"AWS_ACCOUNT\", format_account_id(target))]", "except: return False def _process_principal(principal): if not principal: return None", ") LOGGER.debug(f\"DescribeUser response: {response}\") context.cache[principal_key] = response[\"UserName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key]", "LOGGER.debug(f\"Permission set is filtered: {(permission_set_id, permission_set_name)}\") continue LOGGER.debug(f\"Visiting permission set:", "def _get_single_target_iterator(target, context: _Context): target_type = target[0] if target_type ==", "or a 2-tuple of principal type and id. principal_filter: A", "{target_type}\") sso_admin_client = context.session.client(\"sso-admin\") permission_sets_paginator = sso_admin_client.get_paginator(\"list_permission_sets_provisioned_to_account\") for response in", "permission_set permission_set_id = permission_set_arn.split(\"/\")[-1] def permission_set_iterator(target_type, target_id, target_name): if not", "identity_store_id=None, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False,", "(bool): Retrieve names for principals in assignments. get_permission_set_names (bool): Retrieve", "return [(\"AWS_ACCOUNT\", format_account_id(target))] elif re.match(r\"^r-[a-z0-9]{4,32}$\", target) or re.match(r\"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$\", target): return", "specification or list of target specifications. A target specification is", "= context.session.client(\"organizations\") # ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])[\"OrganizationalUnit\"] # if ou.get(\"Name\"): #", "target {target}\") elif _is_target_tuple(target): target_type, target_id = target if target_type", "# if ou.get(\"Name\"): # target_name = ou(\"Name\") value = (*target,", "context.cache[principal_key] = response[\"UserName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None else: raise", "return _flatten(_process_principal(p) for p in principal) def _process_permission_set(ids, permission_set): if", "= response[\"PermissionSet\"][\"Name\"] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Single", "principal_key, context.principal_filter, (principal_type, principal_id, principal_name)): if context.principal: LOGGER.debug(f\"Principal is filtered:", "_process_principal(principal): if not principal: return None if isinstance(principal, str): return", "_filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Single permission set is filtered:", "context: _Context): def target_iterator(): target_name = None if context.get_target_names: organizations_client", "principal name (which may be None), and returning True if", "str): return [(None, principal)] if _is_principal_tuple(principal): return [tuple(principal)] else: return", "permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): \"\"\"Iterate over AWS", "\"target\", \"target_filter\", \"get_principal_names\", \"get_permission_set_names\", \"get_target_names\", \"ou_recursive\", \"cache\", \"filter_cache\" ]) def", "names for permission sets in assignments. get_target_names (bool): Retrieve names", "if context.permission_set: iterables = [_get_single_permission_set_iterator(ps, context) for ps in context.permission_set]", "hasattr(logging, v): LOGGER.setLevel(getattr(logging, v)) else: kwargs = json.loads(v) def fil(*args):", "TypeError(f\"Unsupported target type {target_type}\") sso_admin_client = context.session.client(\"sso-admin\") permission_sets_paginator = sso_admin_client.get_paginator(\"list_permission_sets_provisioned_to_account\")", "retrieved or it will be looked up using ListInstances principal:", "yield \"AWS_ACCOUNT\", account[\"Id\"], account[\"Name\"] return target_iterator def _get_single_target_iterator(target, context: _Context):", "target_type, target_id, target_name, ) LOGGER.debug(f\"Visiting assignment: {assignment}\") yield assignment if", "sso_admin_client = context.session.client(\"sso-admin\") permission_sets_paginator = sso_admin_client.get_paginator(\"list_permission_sets_provisioned_to_account\") for response in permission_sets_paginator.paginate(", "= [_get_single_target_iterator(t, context) for t in context.target] def target_iterator(): return", ") LOGGER.debug(f\"DescribePermissionSet response: {response}\") context.cache[permission_set_arn] = response[\"PermissionSet\"][\"Name\"] permission_set_name = context.cache[permission_set_arn]", "def _get_all_permission_sets_iterator(context: _Context): def permission_set_iterator(target_type, target_id, target_name): if target_type !=", "to get all accounts including those in child OUs. Returns:", "in [\"AWS_ACCOUNT\", \"AWS_OU\"]: raise TypeError(f\"Invalid target type {target_type}\") return [(target_type,", "not context.get_principal_names: principal_name = None else: if principal_key not in", "of the same. permission_set_filter: A callable taking permission set arn", "target_name): return itertools.chain(*[it(target_type, target_id, target_name) for it in iterables]) return", "sso_admin_client.get_paginator(\"list_permission_sets_provisioned_to_account\") for response in permission_sets_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id): LOGGER.debug(f\"ListPermissionSetsProvisionedToAccount {target_id} page:", "InstanceArn=context.ids.instance_arn, AccountId=target_id, PermissionSetArn=permission_set_arn): LOGGER.debug(f\"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page: {response}\") if", "target_type == \"AWS_OU\": return _get_ou_iterator(target, context) else: raise TypeError(f\"Invalid target", "permission_set_name, target_type, target_id, target_name, ) LOGGER.debug(f\"Visiting assignment: {assignment}\") yield assignment", "permission_set_name)): LOGGER.debug(f\"Permission set is filtered: {(permission_set_id, permission_set_name)}\") continue LOGGER.debug(f\"Visiting permission", "context.cache[principal_key] if not _filter(context.filter_cache, principal_key, context.principal_filter, (principal_type, principal_id, principal_name)): if", "target specification is an account or OU id, or a", "isinstance(principal[0], str), principal[0] in [\"GROUP\", \"USER\"], isinstance(principal[1], str), ]) except:", "_get_principal_iterator(context) for target_type, target_id, target_name in target_iterator(): for permission_set_arn, permission_set_id,", "callable taking target type, target id, and target name (which", "if not principal: return None if isinstance(principal, str): return [(None,", "is provided as a target to get all accounts including", "\"principal\", \"principal_filter\", \"permission_set\", \"permission_set_filter\", \"target\", \"target_filter\", \"get_principal_names\", \"get_permission_set_names\", \"get_target_names\", \"ou_recursive\",", "context) else: raise TypeError(f\"Invalid target type {target_type}\") def _get_all_accounts_iterator(context: _Context):", "filter_cache[key] = func(*args) return filter_cache[key] def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) def", "return target_iterator def _get_ou_iterator(target, context: _Context): def target_iterator(): target_name =", "principal specification is a principal id or a 2-tuple of", "permission_set_arn = f\"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}\" else: raise TypeError(f\"Invalid permission set id {permission_set}\")", "organizations_client = context.session.client(\"organizations\") # ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])[\"OrganizationalUnit\"] # if ou.get(\"Name\"):", "LOGGER.debug(f\"DescribeGroup response: {response}\") context.cache[principal_key] = response[\"DisplayName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] =", "True if the target should be included. get_principal_names (bool): Retrieve", "response[\"UserName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None else: raise ValueError(f\"Unknown principal", "response: {response}\") context.cache[principal_key] = response[\"DisplayName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None", "if isinstance(target, str): if re.match(r\"^\\d+$\", target): return [(\"AWS_ACCOUNT\", format_account_id(target))] elif", "def target_iterator(): organizations_client = context.session.client(\"organizations\") accounts_paginator = organizations_client.get_paginator(\"list_accounts\") for response", "in context.cache: if principal_type == \"GROUP\": try: response = identity_store_client.describe_group(", "Returns: An iterator over Assignment namedtuples \"\"\" ids = Ids(lambda:", "target_type = target[0] if target_type == \"AWS_ACCOUNT\": return _get_account_iterator(target, context)", "_flatten(_process_permission_set(ids, ps) for ps in permission_set) if permission_set.startswith(\"arn\"): permission_set_arn =", "if permission_set.startswith(\"arn\"): permission_set_arn = permission_set elif permission_set.startswith(\"ssoins-\") or permission_set.startswith(\"ins-\"): permission_set_arn", "principal_type, principal_id, principal_name return principal_iterator Assignment = collections.namedtuple(\"Assignment\", [ \"instance_arn\",", "context: _Context): target_type = target[0] if target_type == \"AWS_ACCOUNT\": return", "{(permission_set_id, permission_set_name)}\") continue LOGGER.debug(f\"Visiting permission set: {(permission_set_id, permission_set_name)}\") yield permission_set_arn,", "ps in context.permission_set] def permission_set_iterator(target_type, target_id, target_name): return itertools.chain(*[it(target_type, target_id,", "= target[0] if target_type == \"AWS_ACCOUNT\": return _get_account_iterator(target, context) elif", "in [\"AWS_OU\", \"AWS_ACCOUNT\"], isinstance(target[1], str), ]) except: return False def", "accounts_paginator = organizations_client.get_paginator(\"list_accounts\") for response in accounts_paginator.paginate(): LOGGER.debug(f\"ListAccounts page: {response}\")", "ids=ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names,", "target name (which may be None), and returning True if", "target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): principal = _process_principal(principal) permission_set =", "in response: LOGGER.debug(f\"No assignments for {target_id} {permission_set_arn.split('/')[-1]}\") for assignment in", "returning True if the permission set should be included. target:", "LOGGER.debug(f\"ListPermissionSetsProvisionedToAccount {target_id} page: {response}\") if \"PermissionSets\" not in response: continue", "LOGGER.debug(f\"Visiting single permission set {(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name", ") LOGGER.debug(f\"DescribeGroup response: {response}\") context.cache[principal_key] = response[\"DisplayName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key]", "session = boto3.Session() print(\",\".join(Assignment._fields)) for value in list_assignments(session, **kwargs): print(\",\".join(v", "[_get_single_target_iterator(t, context) for t in context.target] def target_iterator(): return itertools.chain(*[it()", "set is filtered: {(permission_set_id, permission_set_name)}\") else: LOGGER.debug(f\"Visiting single permission set", "_list_assignments( session, ids, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False,", "id. principal_filter: A callable taking principal type, principal id, and", "identity_store_id) return _list_assignments( session, ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target,", "filtered: {principal_type}:{principal_id}\") continue LOGGER.debug(f\"Visiting principal: {principal_type}:{principal_id}\") yield principal_type, principal_id, principal_name", "def _get_all_accounts_iterator(context: _Context): def target_iterator(): organizations_client = context.session.client(\"organizations\") accounts_paginator =", "permission_set_id = permission_set_arn.split(\"/\")[-1] def permission_set_iterator(target_type, target_id, target_name): if not context.get_permission_set_names:", "same. permission_set_filter: A callable taking permission set arn and name", "print(\",\".join(Assignment._fields)) for value in list_assignments(session, **kwargs): print(\",\".join(v or \"\" for", "if key not in filter_cache: filter_cache[key] = func(*args) return filter_cache[key]", "not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Permission set is filtered:", "permission_set_name)): LOGGER.debug(f\"Single permission set is filtered: {(permission_set_id, permission_set_name)}\") else: LOGGER.debug(f\"Visiting", "key not in filter_cache: filter_cache[key] = func(*args) return filter_cache[key] def", "isinstance(target, numbers.Number): return [(\"AWS_ACCOUNT\", format_account_id(target))] if isinstance(target, str): if re.match(r\"^\\d+$\",", "\"permission_set_filter\", \"target\", \"target_filter\", \"get_principal_names\", \"get_permission_set_names\", \"get_target_names\", \"ou_recursive\", \"cache\", \"filter_cache\" ])", "session, instance_arn=None, identity_store_id=None, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False,", "response: {response}\") context.cache[principal_key] = response[\"UserName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None", "context.principal: for principal in context.principal: type_matches = (principal[0] is None", "or OU id, or a 2-tuple of target type, which", "if context.target: iterables = [_get_single_target_iterator(t, context) for t in context.target]", "identity_store_client.describe_group( IdentityStoreId=context.ids.identity_store_id, GroupId=principal_id ) LOGGER.debug(f\"DescribeGroup response: {response}\") context.cache[principal_key] = response[\"DisplayName\"]", "to True if an OU is provided as a target", "== \"AWS_ACCOUNT\": return _get_account_iterator(target, context) elif target_type == \"AWS_OU\": return", "accounts_paginator.paginate(): LOGGER.debug(f\"ListAccounts page: {response}\") for account in response[\"Accounts\"]: account_id =", "context.get_target_names: # organizations_client = context.session.client(\"organizations\") # ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])[\"OrganizationalUnit\"] #", "import Ids, lookup_accounts_for_ou from .format import format_account_id LOGGER = logging.getLogger(__name__)", "be None), and returning True if the target should be", "the permission set should be included. target: A target specification", "accounts\") return _get_all_accounts_iterator(context) def _get_single_permission_set_iterator(permission_set, context: _Context): permission_set_arn = permission_set", "context) for ps in context.permission_set] def permission_set_iterator(target_type, target_id, target_name): return", "= Ids(lambda: session, instance_arn, identity_store_id) return _list_assignments( session, ids, principal=principal,", "the target should be included. get_principal_names (bool): Retrieve names for", "are being retrieved or it will be looked up using", "target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, cache=cache, filter_cache=filter_cache, ) target_iterator", "LOGGER.debug(f\"Single permission set is filtered: {(permission_set_id, permission_set_name)}\") else: LOGGER.debug(f\"Visiting single", "= context.cache[principal_key] if not _filter(context.filter_cache, principal_key, context.principal_filter, (principal_type, principal_id, principal_name)):", "import format_account_id LOGGER = logging.getLogger(__name__) _Context = collections.namedtuple(\"_Context\", [ \"session\",", "\"principal_name\", \"permission_set_arn\", \"permission_set_name\", \"target_type\", \"target_id\", \"target_name\", ]) def list_assignments( session,", "sso_admin_client = context.session.client(\"sso-admin\") identity_store_client = context.session.client(\"identitystore\") assignments_paginator = sso_admin_client.get_paginator(\"list_account_assignments\") for", "permission_set_name return permission_set_iterator def _get_all_permission_sets_iterator(context: _Context): def permission_set_iterator(target_type, target_id, target_name):", "context.cache: response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\")", "permission_set elif permission_set.startswith(\"ssoins-\") or permission_set.startswith(\"ins-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{permission_set}\" elif permission_set.startswith(\"ps-\"):", "InstanceArn=context.ids.instance_arn, AccountId=target_id): LOGGER.debug(f\"ListPermissionSetsProvisionedToAccount {target_id} page: {response}\") if \"PermissionSets\" not in", "kwargs[\"target_filter\"] = fil try: session = boto3.Session() print(\",\".join(Assignment._fields)) for value", "{} filter_cache = {} context = _Context( session = session,", "return target_iterator def _get_single_target_iterator(target, context: _Context): target_type = target[0] if", "yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def _get_permission_set_iterator(context: _Context): if", "(str): The SSO instance to use, or it will be", "response in permission_sets_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id): LOGGER.debug(f\"ListPermissionSetsProvisionedToAccount {target_id} page: {response}\") if", "account: {value}\") yield value return target_iterator def _get_target_iterator(context: _Context): if", "target) or re.match(r\"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$\", target): return [(\"AWS_OU\", target)] else: raise TypeError(f\"Invalid", "_get_principal_iterator(context: _Context): def principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name):", "account or OU id, or a 2-tuple of target type,", "_get_all_permission_sets_iterator(context) def _get_principal_iterator(context: _Context): def principal_iterator( target_type, target_id, target_name, permission_set_arn,", "type_matches and principal[1] == principal_id: LOGGER.debug(f\"Found principal {principal_type}:{principal_id}\") break else:", "{permission_set_arn.split('/')[-1]}\") for assignment in response[\"AccountAssignments\"]: principal_type = assignment[\"PrincipalType\"] principal_id =", "permission_set) target = _process_target(target) cache = {} filter_cache = {}", "= _get_permission_set_iterator(context) principal_iterator = _get_principal_iterator(context) for target_type, target_id, target_name in", "assignments. Args: session (boto3.Session): boto3 session to use instance_arn (str):", "context.session.client(\"organizations\") account = organizations_client.describe_account(AccountId=target[1])[\"Account\"] if account.get(\"Name\"): target_name = account[\"Name\"] value", "(str): The identity store to use if principal names are", "target: A target specification or list of target specifications. A", "permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, ) def", "if __name__ == \"__main__\": import boto3 import sys import json", "in context.permission_set] def permission_set_iterator(target_type, target_id, target_name): return itertools.chain(*[it(target_type, target_id, target_name)", "permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): \"\"\"Iterate over", "cache = {} filter_cache = {} context = _Context( session", "return True if key not in filter_cache: filter_cache[key] = func(*args)", "context.principal: type_matches = (principal[0] is None or principal[0] != principal_type)", "target_iterator(): return itertools.chain(*[it() for it in iterables]) return target_iterator else:", "principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): assignment = Assignment(", "store to use if principal names are being retrieved or", "_get_single_target_iterator(target, context: _Context): target_type = target[0] if target_type == \"AWS_ACCOUNT\":", "permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, cache=cache, filter_cache=filter_cache,", "for response in assignments_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id, PermissionSetArn=permission_set_arn): LOGGER.debug(f\"ListAccountAssignments for {target_id}", "may be None), and returning True if the principal should", "principal_key = (principal_type, principal_id) if not context.get_principal_names: principal_name = None", "IdentityStoreId=context.ids.identity_store_id, UserId=principal_id ) LOGGER.debug(f\"DescribeUser response: {response}\") context.cache[principal_key] = response[\"UserName\"] except", "def permission_set_iterator(target_type, target_id, target_name): if target_type != \"AWS_ACCOUNT\": raise TypeError(f\"Unsupported", "= ou(\"Name\") value = (*target, target_name) accounts = lookup_accounts_for_ou(context.session, value[1],", "LOGGER.debug(f\"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page: {response}\") if not response[\"AccountAssignments\"] and", "return None if isinstance(target, numbers.Number): return [(\"AWS_ACCOUNT\", format_account_id(target))] if isinstance(target,", "= session, ids=ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names,", "= organizations_client.get_paginator(\"list_accounts\") for response in accounts_paginator.paginate(): LOGGER.debug(f\"ListAccounts page: {response}\") for", "principal {principal_type}:{principal_id}\") break else: LOGGER.debug(f\"Principal {principal_type}:{principal_id} does not match principals\")", "raise TypeError(f\"Invalid target type {target_type}\") return [(target_type, target_id)] else: value", "(permission_set_arn, permission_set_name)): LOGGER.debug(f\"Single permission set is filtered: {(permission_set_id, permission_set_name)}\") else:", "be looked up using ListInstances principal: A principal specification or", "provided as a target to get all accounts including those", "{response}\") context.cache[principal_key] = response[\"UserName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None else:", "permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, ) def _list_assignments(", "filtered: {value}\") continue LOGGER.debug(f\"Visiting account: {value}\") yield value return target_iterator", "target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, ) def _list_assignments( session, ids,", "not permission_set: return None if not isinstance(permission_set, str) and isinstance(permission_set,", "over Assignment namedtuples \"\"\" ids = Ids(lambda: session, instance_arn, identity_store_id)", "account_id, account_name) if not _filter(context.filter_cache, account_id, context.target_filter, value): LOGGER.debug(f\"Account is", "for permission_set_arn, permission_set_id, permission_set_name, in permission_set_iterator(target_type, target_id, target_name): for principal_type,", "if not permission_set: return None if not isinstance(permission_set, str) and", "single account: {value}\") yield value return target_iterator def _get_ou_iterator(target, context:", "including those in child OUs. Returns: An iterator over Assignment", "or list of principal specifications. A principal specification is a", "if not context.get_permission_set_names: permission_set_name = None else: sso_admin_client = context.session.client(\"sso-admin\")", "target[0] if target_type == \"AWS_ACCOUNT\": return _get_account_iterator(target, context) elif target_type", "context.session.client(\"sso-admin\") response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\")", "permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def _get_all_permission_sets_iterator(context: _Context): def permission_set_iterator(target_type,", "get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, cache=cache, filter_cache=filter_cache, ) target_iterator = _get_target_iterator(context) permission_set_iterator", "continue LOGGER.debug(f\"Visiting permission set: {(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name", "value[1], recursive=context.ou_recursive) for account in accounts: yield \"AWS_ACCOUNT\", account[\"Id\"], account[\"Name\"]", "target_id, target_name): if not context.get_permission_set_names: permission_set_name = None else: sso_admin_client", "None # if context.get_target_names: # organizations_client = context.session.client(\"organizations\") # ou", "len(principal) == 2, isinstance(principal[0], str), principal[0] in [\"GROUP\", \"USER\"], isinstance(principal[1],", "_filter(context.filter_cache, principal_key, context.principal_filter, (principal_type, principal_id, principal_name)): if context.principal: LOGGER.debug(f\"Principal is", "get_target_names=get_target_names, ou_recursive=ou_recursive, cache=cache, filter_cache=filter_cache, ) target_iterator = _get_target_iterator(context) permission_set_iterator =", "principal_name in principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): assignment", "OUs. Returns: An iterator over Assignment namedtuples \"\"\" ids =", "for value in list_assignments(session, **kwargs): print(\",\".join(v or \"\" for v", "Ids, lookup_accounts_for_ou from .format import format_account_id LOGGER = logging.getLogger(__name__) _Context", "\"NextToken\" in response: LOGGER.debug(f\"No assignments for {target_id} {permission_set_arn.split('/')[-1]}\") for assignment", "identity_store_client = context.session.client(\"identitystore\") assignments_paginator = sso_admin_client.get_paginator(\"list_account_assignments\") for response in assignments_paginator.paginate(", "def _filter(filter_cache, key, func, args): if not func: return True", "child OUs. Returns: An iterator over Assignment namedtuples \"\"\" ids", "of principal specifications. A principal specification is a principal id", "{value}\") continue LOGGER.debug(f\"Visiting account: {value}\") yield value return target_iterator def", "return None if isinstance(principal, str): return [(None, principal)] if _is_principal_tuple(principal):", "None or principal[0] != principal_type) if type_matches and principal[1] ==", "permission_set_arn = f\"arn:aws:sso:::permissionSet/{permission_set}\" elif permission_set.startswith(\"ps-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}\" else: raise", "{} for v in sys.argv[1:]: if hasattr(logging, v): LOGGER.setLevel(getattr(logging, v))", "for account in response[\"Accounts\"]: account_id = account[\"Id\"] account_name = account[\"Name\"]", "for permission sets in assignments. get_target_names (bool): Retrieve names for", "callable taking permission set arn and name (name may be", "instance_arn=None, identity_store_id=None, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False,", "principal_name, permission_set_arn, permission_set_name, target_type, target_id, target_name, ) LOGGER.debug(f\"Visiting assignment: {assignment}\")", "in sys.argv[1:]: if hasattr(logging, v): LOGGER.setLevel(getattr(logging, v)) else: kwargs =", "for t in target) return value def _get_account_iterator(target, context: _Context):", "type, which is either AWS_ACCOUNT or AWS_OU, and target id.", "_get_permission_set_iterator(context: _Context): if context.permission_set: iterables = [_get_single_permission_set_iterator(ps, context) for ps", "account_name) if not _filter(context.filter_cache, account_id, context.target_filter, value): LOGGER.debug(f\"Account is filtered:", "= sso_admin_client.get_paginator(\"list_permission_sets_provisioned_to_account\") for response in permission_sets_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id): LOGGER.debug(f\"ListPermissionSetsProvisionedToAccount {target_id}", "_process_target(target): if not target: return None if isinstance(target, numbers.Number): return", "principals in assignments. get_permission_set_names (bool): Retrieve names for permission sets", "set: {(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def", "a principal id or a 2-tuple of principal type and", "if principal_type == \"GROUP\": try: response = identity_store_client.describe_group( IdentityStoreId=context.ids.identity_store_id, GroupId=principal_id", "yield value return target_iterator def _get_target_iterator(context: _Context): if context.target: iterables", "not \"NextToken\" in response: LOGGER.debug(f\"No assignments for {target_id} {permission_set_arn.split('/')[-1]}\") for", "continue principal_key = (principal_type, principal_id) if not context.get_principal_names: principal_name =", "LOGGER = logging.getLogger(__name__) _Context = collections.namedtuple(\"_Context\", [ \"session\", \"ids\", \"principal\",", "for p in principal) def _process_permission_set(ids, permission_set): if not permission_set:", "for principal_type, principal_id, principal_name in principal_iterator( target_type, target_id, target_name, permission_set_arn,", "not func: return True if key not in filter_cache: filter_cache[key]", "sets\") return _get_all_permission_sets_iterator(context) def _get_principal_iterator(context: _Context): def principal_iterator( target_type, target_id,", "context.cache: if principal_type == \"GROUP\": try: response = identity_store_client.describe_group( IdentityStoreId=context.ids.identity_store_id,", "type {target_type}\") return [(target_type, target_id)] else: value = _flatten(_process_target(t) for", "permission_set_arn, permission_set_id, permission_set_name): assignment = Assignment( ids.instance_arn, principal_type, principal_id, principal_name,", "should be included. target: A target specification or list of", "else: value = _flatten(_process_target(t) for t in target) return value", "assignments. get_permission_set_names (bool): Retrieve names for permission sets in assignments.", "permission sets in assignments. get_target_names (bool): Retrieve names for targets", "be included. permission_set: A permission set arn or id, or", "target_iterator(): for permission_set_arn, permission_set_id, permission_set_name, in permission_set_iterator(target_type, target_id, target_name): for", "session, ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names,", "get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, ) def _list_assignments( session, ids, principal=None, principal_filter=None,", "for assignment in response[\"AccountAssignments\"]: principal_type = assignment[\"PrincipalType\"] principal_id = assignment[\"PrincipalId\"]", "ListInstances principal: A principal specification or list of principal specifications.", "not in context.cache: response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet", "= sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\") permission_set_name =", "principal[1] == principal_id: LOGGER.debug(f\"Found principal {principal_type}:{principal_id}\") break else: LOGGER.debug(f\"Principal {principal_type}:{principal_id}", "permission_set_arn = permission_set elif permission_set.startswith(\"ssoins-\") or permission_set.startswith(\"ins-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{permission_set}\"", "included. get_principal_names (bool): Retrieve names for principals in assignments. get_permission_set_names", "{target}\") elif _is_target_tuple(target): target_type, target_id = target if target_type not", "permission_set_name)}\") else: LOGGER.debug(f\"Visiting single permission set {(permission_set_id, permission_set_name)}\") yield permission_set_arn,", "principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False):", "principal_id, principal_name, permission_set_arn, permission_set_name, target_type, target_id, target_name, ) LOGGER.debug(f\"Visiting assignment:", "in accounts: yield \"AWS_ACCOUNT\", account[\"Id\"], account[\"Name\"] return target_iterator def _get_single_target_iterator(target,", "target_type != \"AWS_ACCOUNT\": raise TypeError(f\"Unsupported target type {target_type}\") sso_admin_client =", "should be included. get_principal_names (bool): Retrieve names for principals in", "is filtered: {(permission_set_id, permission_set_name)}\") continue LOGGER.debug(f\"Visiting permission set: {(permission_set_id, permission_set_name)}\")", "(*target, target_name) accounts = lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive) for account in", "and returning True if the principal should be included. permission_set:", "= _Context( session = session, ids=ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter,", "\"GROUP\": try: response = identity_store_client.describe_group( IdentityStoreId=context.ids.identity_store_id, GroupId=principal_id ) LOGGER.debug(f\"DescribeGroup response:", "id, and target name (which may be None), and returning", "if context.principal: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") else: LOGGER.debug(f\"Principal is filtered:", "get_permission_set_names (bool): Retrieve names for permission sets in assignments. get_target_names", "if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Permission set is", "type {principal_type}\") principal_name = context.cache[principal_key] if not _filter(context.filter_cache, principal_key, context.principal_filter,", "permission set: {(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator", "in iterables]) return target_iterator else: LOGGER.debug(f\"Iterating for all accounts\") return", "recursive=context.ou_recursive) for account in accounts: yield \"AWS_ACCOUNT\", account[\"Id\"], account[\"Name\"] return", "permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def _get_all_permission_sets_iterator(context: _Context):", "set is filtered: {(permission_set_id, permission_set_name)}\") continue LOGGER.debug(f\"Visiting permission set: {(permission_set_id,", "return [(target_type, target_id)] else: value = _flatten(_process_target(t) for t in", "type {target_type}\") sso_admin_client = context.session.client(\"sso-admin\") permission_sets_paginator = sso_admin_client.get_paginator(\"list_permission_sets_provisioned_to_account\") for response", "permission_set_name): if target_type != \"AWS_ACCOUNT\": raise TypeError(f\"Unsupported target type {target_type}\")", "InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\") context.cache[permission_set_arn] = response[\"PermissionSet\"][\"Name\"] permission_set_name", "filtered: {principal_type}:{principal_id}\") else: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") continue LOGGER.debug(f\"Visiting principal:", "filtered: {value}\") else: LOGGER.debug(f\"Visiting single account: {value}\") yield value return", "value[1], context.target_filter, value): LOGGER.debug(f\"Account is filtered: {value}\") else: LOGGER.debug(f\"Visiting single", "_get_all_accounts_iterator(context: _Context): def target_iterator(): organizations_client = context.session.client(\"organizations\") accounts_paginator = organizations_client.get_paginator(\"list_accounts\")", "in context.target] def target_iterator(): return itertools.chain(*[it() for it in iterables])", "target should be included. get_principal_names (bool): Retrieve names for principals", "(principal_type, principal_id) if not context.get_principal_names: principal_name = None else: if", "context.cache[principal_key] = response[\"DisplayName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None elif principal_type", "logging from collections.abc import Iterable import itertools import aws_error_utils from", "_get_all_permission_sets_iterator(context: _Context): def permission_set_iterator(target_type, target_id, target_name): if target_type != \"AWS_ACCOUNT\":", "in assignments. get_target_names (bool): Retrieve names for targets in assignments.", "it will be looked up using ListInstances principal: A principal", "t in target) return value def _get_account_iterator(target, context: _Context): def", ") LOGGER.debug(f\"Visiting assignment: {assignment}\") yield assignment if __name__ == \"__main__\":", "get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, ) def _list_assignments( session, ids, principal=None,", "principal_type, principal_id, principal_name in principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id,", "None if isinstance(principal, str): return [(None, principal)] if _is_principal_tuple(principal): return", "list_assignments(session, **kwargs): print(\",\".join(v or \"\" for v in value)) except", "sets in assignments. get_target_names (bool): Retrieve names for targets in", "_Context): if context.target: iterables = [_get_single_target_iterator(t, context) for t in", "Iterable): return _flatten(_process_permission_set(ids, ps) for ps in permission_set) if permission_set.startswith(\"arn\"):", "\"principal_type\", \"principal_id\", \"principal_name\", \"permission_set_arn\", \"permission_set_name\", \"target_type\", \"target_id\", \"target_name\", ]) def", "{response}\") if \"PermissionSets\" not in response: continue for permission_set_arn in", "context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Permission set is filtered: {(permission_set_id, permission_set_name)}\") continue", "[(None, principal)] if _is_principal_tuple(principal): return [tuple(principal)] else: return _flatten(_process_principal(p) for", "for all permission sets\") return _get_all_permission_sets_iterator(context) def _get_principal_iterator(context: _Context): def", "= None elif principal_type == \"USER\": try: response = identity_store_client.describe_user(", "_is_principal_tuple(principal): return [tuple(principal)] else: return _flatten(_process_principal(p) for p in principal)", "principal: {principal_type}:{principal_id}\") yield principal_type, principal_id, principal_name return principal_iterator Assignment =", "fil(*args): print(args) return True kwargs[\"target_filter\"] = fil try: session =", "return [tuple(principal)] else: return _flatten(_process_principal(p) for p in principal) def", "context: _Context): permission_set_arn = permission_set permission_set_id = permission_set_arn.split(\"/\")[-1] def permission_set_iterator(target_type,", "isinstance(principal, str): return [(None, principal)] if _is_principal_tuple(principal): return [tuple(principal)] else:", "principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): if target_type !=", "context.session.client(\"organizations\") # ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])[\"OrganizationalUnit\"] # if ou.get(\"Name\"): # target_name", "json logging.basicConfig(level=logging.INFO) kwargs = {} for v in sys.argv[1:]: if", "target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): \"\"\"Iterate over AWS SSO", "identity_store_client.describe_user( IdentityStoreId=context.ids.identity_store_id, UserId=principal_id ) LOGGER.debug(f\"DescribeUser response: {response}\") context.cache[principal_key] = response[\"UserName\"]", "it in iterables]) return target_iterator else: LOGGER.debug(f\"Iterating for all accounts\")", "_Context): def target_iterator(): target_name = None if context.get_target_names: organizations_client =", "= context.session.client(\"sso-admin\") permission_sets_paginator = sso_admin_client.get_paginator(\"list_permission_sets_provisioned_to_account\") for response in permission_sets_paginator.paginate( InstanceArn=context.ids.instance_arn,", "yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def _get_all_permission_sets_iterator(context: _Context): def", "permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Single permission set is filtered: {(permission_set_id,", "value): LOGGER.debug(f\"Account is filtered: {value}\") continue LOGGER.debug(f\"Visiting account: {value}\") yield", "get_target_names (bool): Retrieve names for targets in assignments. ou_recursive (bool):", "if target_type not in [\"AWS_ACCOUNT\", \"AWS_OU\"]: raise TypeError(f\"Invalid target type", "None else: sso_admin_client = context.session.client(\"sso-admin\") response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn", "def _is_target_tuple(target): try: return all([ len(target) == 2, isinstance(target[0], str),", "target_name): for principal_type, principal_id, principal_name in principal_iterator( target_type, target_id, target_name,", "str) and isinstance(permission_set, Iterable): return _flatten(_process_permission_set(ids, ps) for ps in", "{principal_type}:{principal_id}\") else: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") continue LOGGER.debug(f\"Visiting principal: {principal_type}:{principal_id}\")", "principal type {principal_type}\") principal_name = context.cache[principal_key] if not _filter(context.filter_cache, principal_key,", "for {target_id} {permission_set_arn.split('/')[-1]} page: {response}\") if not response[\"AccountAssignments\"] and not", "_flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) def _is_principal_tuple(principal): try: return all([ len(principal) ==", "value = (*target, target_name) accounts = lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive) for", "either AWS_ACCOUNT or AWS_OU, and target id. target_filter: A callable", "sys.argv[1:]: if hasattr(logging, v): LOGGER.setLevel(getattr(logging, v)) else: kwargs = json.loads(v)", "assignment[\"PrincipalId\"] LOGGER.debug(f\"Visiting principal {principal_type}:{principal_id}\") if context.principal: for principal in context.principal:", "target_iterator(): target_name = None if context.get_target_names: organizations_client = context.session.client(\"organizations\") account", "import Iterable import itertools import aws_error_utils from .lookup import Ids,", "True if the permission set should be included. target: A", "principal_type == \"GROUP\": try: response = identity_store_client.describe_group( IdentityStoreId=context.ids.identity_store_id, GroupId=principal_id )", "it will be looked up using ListInstances identity_store_id (str): The", "or a 2-tuple of target type, which is either AWS_ACCOUNT", "= _get_principal_iterator(context) for target_type, target_id, target_name in target_iterator(): for permission_set_arn,", "permission_set_arn, permission_set_id, permission_set_name): if target_type != \"AWS_ACCOUNT\": raise TypeError(f\"Unsupported target", "target_type, target_id = target if target_type not in [\"AWS_ACCOUNT\", \"AWS_OU\"]:", "str), ]) except: return False def _process_principal(principal): if not principal:", "taking principal type, principal id, and principal name (which may", "LOGGER.debug(f\"No assignments for {target_id} {permission_set_arn.split('/')[-1]}\") for assignment in response[\"AccountAssignments\"]: principal_type", "principals\") continue principal_key = (principal_type, principal_id) if not context.get_principal_names: principal_name", "permission_set_arn not in context.cache: response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn )", "principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive,", "set arn or id, or a list of the same.", "= [_get_single_permission_set_iterator(ps, context) for ps in context.permission_set] def permission_set_iterator(target_type, target_id,", "context.cache[permission_set_arn] = response[\"PermissionSet\"][\"Name\"] permission_set_name = context.cache[permission_set_arn] if not _filter(context.filter_cache, permission_set_arn,", "= None # if context.get_target_names: # organizations_client = context.session.client(\"organizations\") #", "else: raise TypeError(f\"Invalid permission set id {permission_set}\") return [permission_set_arn] def", "PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\") permission_set_name = response[\"PermissionSet\"][\"Name\"] if not", "a 2-tuple of principal type and id. principal_filter: A callable", "set {(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def", "principal_name return principal_iterator Assignment = collections.namedtuple(\"Assignment\", [ \"instance_arn\", \"principal_type\", \"principal_id\",", "principal_id, principal_name)): if context.principal: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") else: LOGGER.debug(f\"Principal", "not context.get_permission_set_names: permission_set_name = None else: sso_admin_client = context.session.client(\"sso-admin\") response", "def _get_account_iterator(target, context: _Context): def target_iterator(): target_name = None if", "isinstance(target[0], str), target[0] in [\"AWS_OU\", \"AWS_ACCOUNT\"], isinstance(target[1], str), ]) except:", "context.permission_set: iterables = [_get_single_permission_set_iterator(ps, context) for ps in context.permission_set] def", "if target_type != \"AWS_ACCOUNT\": raise TypeError(f\"Unsupported target type {target_type}\") sso_admin_client", "{target_type}\") sso_admin_client = context.session.client(\"sso-admin\") identity_store_client = context.session.client(\"identitystore\") assignments_paginator = sso_admin_client.get_paginator(\"list_account_assignments\")", "permission_set) if permission_set.startswith(\"arn\"): permission_set_arn = permission_set elif permission_set.startswith(\"ssoins-\") or permission_set.startswith(\"ins-\"):", "if account.get(\"Name\"): target_name = account[\"Name\"] value = (*target, target_name) if", "response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\") context.cache[permission_set_arn]", "True if key not in filter_cache: filter_cache[key] = func(*args) return", "= _process_target(target) cache = {} filter_cache = {} context =", "ps in permission_set) if permission_set.startswith(\"arn\"): permission_set_arn = permission_set elif permission_set.startswith(\"ssoins-\")", "if not context.get_principal_names: principal_name = None else: if principal_key not", "in principal) def _process_permission_set(ids, permission_set): if not permission_set: return None", "using ListInstances principal: A principal specification or list of principal", "import json logging.basicConfig(level=logging.INFO) kwargs = {} for v in sys.argv[1:]:", "not match principals\") continue principal_key = (principal_type, principal_id) if not", "filter_cache=filter_cache, ) target_iterator = _get_target_iterator(context) permission_set_iterator = _get_permission_set_iterator(context) principal_iterator =", "LOGGER.debug(f\"DescribeUser response: {response}\") context.cache[principal_key] = response[\"UserName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] =", "get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, cache=cache, filter_cache=filter_cache, ) target_iterator = _get_target_iterator(context)", "boto3 session to use instance_arn (str): The SSO instance to", "id, and principal name (which may be None), and returning", "\"filter_cache\" ]) def _filter(filter_cache, key, func, args): if not func:", "permission_set_arn = permission_set permission_set_id = permission_set_arn.split(\"/\")[-1] def permission_set_iterator(target_type, target_id, target_name):", "== 2, isinstance(principal[0], str), principal[0] in [\"GROUP\", \"USER\"], isinstance(principal[1], str),", "= None else: if principal_key not in context.cache: if principal_type", "and isinstance(permission_set, Iterable): return _flatten(_process_permission_set(ids, ps) for ps in permission_set)", "page: {response}\") if \"PermissionSets\" not in response: continue for permission_set_arn", "\"target_name\", ]) def list_assignments( session, instance_arn=None, identity_store_id=None, principal=None, principal_filter=None, permission_set=None,", "[ \"session\", \"ids\", \"principal\", \"principal_filter\", \"permission_set\", \"permission_set_filter\", \"target\", \"target_filter\", \"get_principal_names\",", "else: LOGGER.debug(f\"Visiting single permission set {(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id,", "_get_target_iterator(context) permission_set_iterator = _get_permission_set_iterator(context) principal_iterator = _get_principal_iterator(context) for target_type, target_id,", "principal_type, principal_id, principal_name, permission_set_arn, permission_set_name, target_type, target_id, target_name, ) LOGGER.debug(f\"Visiting", "else: raise TypeError(f\"Invalid target {target}\") elif _is_target_tuple(target): target_type, target_id =", "= boto3.Session() print(\",\".join(Assignment._fields)) for value in list_assignments(session, **kwargs): print(\",\".join(v or", "collections import logging from collections.abc import Iterable import itertools import", "principal_key not in context.cache: if principal_type == \"GROUP\": try: response", "which is either AWS_ACCOUNT or AWS_OU, and target id. target_filter:", "re.match(r\"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$\", target): return [(\"AWS_OU\", target)] else: raise TypeError(f\"Invalid target {target}\")", "principal[0] in [\"GROUP\", \"USER\"], isinstance(principal[1], str), ]) except: return False", "in response: continue for permission_set_arn in response[\"PermissionSets\"]: permission_set_id = permission_set_arn.split(\"/\",", "# if context.get_target_names: # organizations_client = context.session.client(\"organizations\") # ou =", "target_iterator def _get_single_target_iterator(target, context: _Context): target_type = target[0] if target_type", "ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names,", "def _get_ou_iterator(target, context: _Context): def target_iterator(): target_name = None #", "response: continue for permission_set_arn in response[\"PermissionSets\"]: permission_set_id = permission_set_arn.split(\"/\", 2)[-1]", "None if context.get_target_names: organizations_client = context.session.client(\"organizations\") account = organizations_client.describe_account(AccountId=target[1])[\"Account\"] if", "iterables]) return permission_set_iterator else: LOGGER.debug(\"Iterating for all permission sets\") return", "else: if principal_key not in context.cache: if principal_type == \"GROUP\":", "= (principal_type, principal_id) if not context.get_principal_names: principal_name = None else:", "target_type == \"AWS_ACCOUNT\": return _get_account_iterator(target, context) elif target_type == \"AWS_OU\":", "sys import json logging.basicConfig(level=logging.INFO) kwargs = {} for v in", "principal_filter: A callable taking principal type, principal id, and principal", "permission_set_id, permission_set_name): assignment = Assignment( ids.instance_arn, principal_type, principal_id, principal_name, permission_set_arn,", "target type {target_type}\") sso_admin_client = context.session.client(\"sso-admin\") permission_sets_paginator = sso_admin_client.get_paginator(\"list_permission_sets_provisioned_to_account\") for", "permission_set): if not permission_set: return None if not isinstance(permission_set, str)", "organizations_client.describe_account(AccountId=target[1])[\"Account\"] if account.get(\"Name\"): target_name = account[\"Name\"] value = (*target, target_name)", "value): LOGGER.debug(f\"Account is filtered: {value}\") else: LOGGER.debug(f\"Visiting single account: {value}\")", "for ps in context.permission_set] def permission_set_iterator(target_type, target_id, target_name): return itertools.chain(*[it(target_type,", "target_name = None # if context.get_target_names: # organizations_client = context.session.client(\"organizations\")", "str), target[0] in [\"AWS_OU\", \"AWS_ACCOUNT\"], isinstance(target[1], str), ]) except: return", "permission_set_arn in response[\"PermissionSets\"]: permission_set_id = permission_set_arn.split(\"/\", 2)[-1] if not context.get_permission_set_names:", "target): return [(\"AWS_OU\", target)] else: raise TypeError(f\"Invalid target {target}\") elif", "target type {target_type}\") sso_admin_client = context.session.client(\"sso-admin\") identity_store_client = context.session.client(\"identitystore\") assignments_paginator", "target id. target_filter: A callable taking target type, target id,", "iterables = [_get_single_target_iterator(t, context) for t in context.target] def target_iterator():", "= fil try: session = boto3.Session() print(\",\".join(Assignment._fields)) for value in", "principal type and id. principal_filter: A callable taking principal type,", "if isinstance(principal, str): return [(None, principal)] if _is_principal_tuple(principal): return [tuple(principal)]", "lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive) for account in accounts: yield \"AWS_ACCOUNT\", account[\"Id\"],", "identity store to use if principal names are being retrieved", "kwargs = json.loads(v) def fil(*args): print(args) return True kwargs[\"target_filter\"] =", "all permission sets\") return _get_all_permission_sets_iterator(context) def _get_principal_iterator(context: _Context): def principal_iterator(", "if principal_key not in context.cache: if principal_type == \"GROUP\": try:", "len(target) == 2, isinstance(target[0], str), target[0] in [\"AWS_OU\", \"AWS_ACCOUNT\"], isinstance(target[1],", "return itertools.chain(*[it(target_type, target_id, target_name) for it in iterables]) return permission_set_iterator", "# organizations_client = context.session.client(\"organizations\") # ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])[\"OrganizationalUnit\"] # if", "account_name = account[\"Name\"] value = (\"AWS_ACCOUNT\", account_id, account_name) if not", "context.target_filter, value): LOGGER.debug(f\"Account is filtered: {value}\") continue LOGGER.debug(f\"Visiting account: {value}\")", "return all([ len(principal) == 2, isinstance(principal[0], str), principal[0] in [\"GROUP\",", "for principals in assignments. get_permission_set_names (bool): Retrieve names for permission", "principal names are being retrieved or it will be looked", "filtered: {(permission_set_id, permission_set_name)}\") else: LOGGER.debug(f\"Visiting single permission set {(permission_set_id, permission_set_name)}\")", "in context.cache: response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response:", "target_id, target_name, ) LOGGER.debug(f\"Visiting assignment: {assignment}\") yield assignment if __name__", "response[\"AccountAssignments\"]: principal_type = assignment[\"PrincipalType\"] principal_id = assignment[\"PrincipalId\"] LOGGER.debug(f\"Visiting principal {principal_type}:{principal_id}\")", "{principal_type}:{principal_id}\") yield principal_type, principal_id, principal_name return principal_iterator Assignment = collections.namedtuple(\"Assignment\",", "principal_id, principal_name in principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name):", "context.permission_set] def permission_set_iterator(target_type, target_id, target_name): return itertools.chain(*[it(target_type, target_id, target_name) for", "(bool): Retrieve names for targets in assignments. ou_recursive (bool): Set", "if not target: return None if isinstance(target, numbers.Number): return [(\"AWS_ACCOUNT\",", "get_principal_names (bool): Retrieve names for principals in assignments. get_permission_set_names (bool):", "logging.getLogger(__name__) _Context = collections.namedtuple(\"_Context\", [ \"session\", \"ids\", \"principal\", \"principal_filter\", \"permission_set\",", "ou_recursive=ou_recursive, ) def _list_assignments( session, ids, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None,", "list_assignments( session, instance_arn=None, identity_store_id=None, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None,", "elif target_type == \"AWS_OU\": return _get_ou_iterator(target, context) else: raise TypeError(f\"Invalid", "\"cache\", \"filter_cache\" ]) def _filter(filter_cache, key, func, args): if not", "response in assignments_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id, PermissionSetArn=permission_set_arn): LOGGER.debug(f\"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]}", "permission set is filtered: {(permission_set_id, permission_set_name)}\") else: LOGGER.debug(f\"Visiting single permission", "in accounts_paginator.paginate(): LOGGER.debug(f\"ListAccounts page: {response}\") for account in response[\"Accounts\"]: account_id", "\"permission_set_arn\", \"permission_set_name\", \"target_type\", \"target_id\", \"target_name\", ]) def list_assignments( session, instance_arn=None,", "all([ len(target) == 2, isinstance(target[0], str), target[0] in [\"AWS_OU\", \"AWS_ACCOUNT\"],", "\"target_filter\", \"get_principal_names\", \"get_permission_set_names\", \"get_target_names\", \"ou_recursive\", \"cache\", \"filter_cache\" ]) def _filter(filter_cache,", "= context.cache[permission_set_arn] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Permission", "elif permission_set.startswith(\"ssoins-\") or permission_set.startswith(\"ins-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{permission_set}\" elif permission_set.startswith(\"ps-\"): permission_set_arn", "[permission_set_arn] def _is_target_tuple(target): try: return all([ len(target) == 2, isinstance(target[0],", "_Context = collections.namedtuple(\"_Context\", [ \"session\", \"ids\", \"principal\", \"principal_filter\", \"permission_set\", \"permission_set_filter\",", "target_id, target_name): return itertools.chain(*[it(target_type, target_id, target_name) for it in iterables])", "PermissionSetArn=permission_set_arn): LOGGER.debug(f\"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page: {response}\") if not response[\"AccountAssignments\"]", "or list of target specifications. A target specification is an", "target_name = ou(\"Name\") value = (*target, target_name) accounts = lookup_accounts_for_ou(context.session,", "the principal should be included. permission_set: A permission set arn", "PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\") context.cache[permission_set_arn] = response[\"PermissionSet\"][\"Name\"] permission_set_name =", "= target if target_type not in [\"AWS_ACCOUNT\", \"AWS_OU\"]: raise TypeError(f\"Invalid", "print(args) return True kwargs[\"target_filter\"] = fil try: session = boto3.Session()", "sso_admin_client.get_paginator(\"list_account_assignments\") for response in assignments_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id, PermissionSetArn=permission_set_arn): LOGGER.debug(f\"ListAccountAssignments for", "= lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive) for account in accounts: yield \"AWS_ACCOUNT\",", "permission_set.startswith(\"ssoins-\") or permission_set.startswith(\"ins-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{permission_set}\" elif permission_set.startswith(\"ps-\"): permission_set_arn =", "if \"PermissionSets\" not in response: continue for permission_set_arn in response[\"PermissionSets\"]:", "be looked up using ListInstances identity_store_id (str): The identity store", "UserId=principal_id ) LOGGER.debug(f\"DescribeUser response: {response}\") context.cache[principal_key] = response[\"UserName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"):", "filter_cache = {} context = _Context( session = session, ids=ids,", "\"target_id\", \"target_name\", ]) def list_assignments( session, instance_arn=None, identity_store_id=None, principal=None, principal_filter=None,", "principal) def _process_permission_set(ids, permission_set): if not permission_set: return None if", "{principal_type}:{principal_id} does not match principals\") continue principal_key = (principal_type, principal_id)", "= f\"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}\" else: raise TypeError(f\"Invalid permission set id {permission_set}\") return", "continue for permission_set_arn in response[\"PermissionSets\"]: permission_set_id = permission_set_arn.split(\"/\", 2)[-1] if", "ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])[\"OrganizationalUnit\"] # if ou.get(\"Name\"): # target_name = ou(\"Name\")", "assignment if __name__ == \"__main__\": import boto3 import sys import", "target type, target id, and target name (which may be", "= context.session.client(\"organizations\") accounts_paginator = organizations_client.get_paginator(\"list_accounts\") for response in accounts_paginator.paginate(): LOGGER.debug(f\"ListAccounts", "A principal specification is a principal id or a 2-tuple", "\"USER\": try: response = identity_store_client.describe_user( IdentityStoreId=context.ids.identity_store_id, UserId=principal_id ) LOGGER.debug(f\"DescribeUser response:", "ListInstances identity_store_id (str): The identity store to use if principal", "OU is provided as a target to get all accounts", "Assignment namedtuples \"\"\" ids = Ids(lambda: session, instance_arn, identity_store_id) return", "or principal[0] != principal_type) if type_matches and principal[1] == principal_id:", "isinstance(target[1], str), ]) except: return False def _process_target(target): if not", "if an OU is provided as a target to get", "get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): principal = _process_principal(principal) permission_set = _process_permission_set(ids,", "LOGGER.debug(f\"Account is filtered: {value}\") else: LOGGER.debug(f\"Visiting single account: {value}\") yield", "get_permission_set_names=False, get_target_names=False, ou_recursive=False): principal = _process_principal(principal) permission_set = _process_permission_set(ids, permission_set)", "to use, or it will be looked up using ListInstances", "cache=cache, filter_cache=filter_cache, ) target_iterator = _get_target_iterator(context) permission_set_iterator = _get_permission_set_iterator(context) principal_iterator", "v)) else: kwargs = json.loads(v) def fil(*args): print(args) return True", "= response[\"UserName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None else: raise ValueError(f\"Unknown", "def list_assignments( session, instance_arn=None, identity_store_id=None, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None,", "format_account_id LOGGER = logging.getLogger(__name__) _Context = collections.namedtuple(\"_Context\", [ \"session\", \"ids\",", "permission_set_id, permission_set_name return permission_set_iterator def _get_permission_set_iterator(context: _Context): if context.permission_set: iterables", "for account in accounts: yield \"AWS_ACCOUNT\", account[\"Id\"], account[\"Name\"] return target_iterator", "LOGGER.debug(f\"Found principal {principal_type}:{principal_id}\") break else: LOGGER.debug(f\"Principal {principal_type}:{principal_id} does not match", "account[\"Id\"] account_name = account[\"Name\"] value = (\"AWS_ACCOUNT\", account_id, account_name) if", "for all accounts\") return _get_all_accounts_iterator(context) def _get_single_permission_set_iterator(permission_set, context: _Context): permission_set_arn", "account in accounts: yield \"AWS_ACCOUNT\", account[\"Id\"], account[\"Name\"] return target_iterator def", "LOGGER.debug(f\"Visiting principal {principal_type}:{principal_id}\") if context.principal: for principal in context.principal: type_matches", "principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): \"\"\"Iterate", "assignments for {target_id} {permission_set_arn.split('/')[-1]}\") for assignment in response[\"AccountAssignments\"]: principal_type =", "{response}\") context.cache[principal_key] = response[\"DisplayName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None elif", "boto3.Session() print(\",\".join(Assignment._fields)) for value in list_assignments(session, **kwargs): print(\",\".join(v or \"\"", "session (boto3.Session): boto3 session to use instance_arn (str): The SSO", "value def _get_account_iterator(target, context: _Context): def target_iterator(): target_name = None", "list of target specifications. A target specification is an account", "sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\") permission_set_name = response[\"PermissionSet\"][\"Name\"]", "\"get_permission_set_names\", \"get_target_names\", \"ou_recursive\", \"cache\", \"filter_cache\" ]) def _filter(filter_cache, key, func,", "[(target_type, target_id)] else: value = _flatten(_process_target(t) for t in target)", "_Context): permission_set_arn = permission_set permission_set_id = permission_set_arn.split(\"/\")[-1] def permission_set_iterator(target_type, target_id,", "be included. get_principal_names (bool): Retrieve names for principals in assignments.", "LOGGER.debug(f\"Visiting assignment: {assignment}\") yield assignment if __name__ == \"__main__\": import", "using ListInstances identity_store_id (str): The identity store to use if", "specification is a principal id or a 2-tuple of principal", "list of principal specifications. A principal specification is a principal", "assignments. ou_recursive (bool): Set to True if an OU is", "_process_permission_set(ids, permission_set): if not permission_set: return None if not isinstance(permission_set,", "def _process_principal(principal): if not principal: return None if isinstance(principal, str):", "_flatten(_process_target(t) for t in target) return value def _get_account_iterator(target, context:", "_get_account_iterator(target, context) elif target_type == \"AWS_OU\": return _get_ou_iterator(target, context) else:", "False def _process_target(target): if not target: return None if isinstance(target,", "yield assignment if __name__ == \"__main__\": import boto3 import sys", "logging.basicConfig(level=logging.INFO) kwargs = {} for v in sys.argv[1:]: if hasattr(logging,", "permission_set_name = response[\"PermissionSet\"][\"Name\"] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)):", "Retrieve names for principals in assignments. get_permission_set_names (bool): Retrieve names", "return None if not isinstance(permission_set, str) and isinstance(permission_set, Iterable): return", "account: {value}\") yield value return target_iterator def _get_ou_iterator(target, context: _Context):", "itertools import aws_error_utils from .lookup import Ids, lookup_accounts_for_ou from .format", "LOGGER.debug(f\"Visiting account: {value}\") yield value return target_iterator def _get_target_iterator(context: _Context):", "2)[-1] if not context.get_permission_set_names: permission_set_name = None else: if permission_set_arn", "if not context.get_permission_set_names: permission_set_name = None else: if permission_set_arn not", "target id, and target name (which may be None), and", "be None), and returning True if the principal should be", "[\"AWS_ACCOUNT\", \"AWS_OU\"]: raise TypeError(f\"Invalid target type {target_type}\") return [(target_type, target_id)]", "not in [\"AWS_ACCOUNT\", \"AWS_OU\"]: raise TypeError(f\"Invalid target type {target_type}\") return", "target_iterator(): organizations_client = context.session.client(\"organizations\") accounts_paginator = organizations_client.get_paginator(\"list_accounts\") for response in", "is filtered: {(permission_set_id, permission_set_name)}\") else: LOGGER.debug(f\"Visiting single permission set {(permission_set_id,", "of principal type and id. principal_filter: A callable taking principal", "(bool): Set to True if an OU is provided as", "(\"AWS_ACCOUNT\", account_id, account_name) if not _filter(context.filter_cache, account_id, context.target_filter, value): LOGGER.debug(f\"Account", "!= \"AWS_ACCOUNT\": raise TypeError(f\"Unsupported target type {target_type}\") sso_admin_client = context.session.client(\"sso-admin\")", "value = (\"AWS_ACCOUNT\", account_id, account_name) if not _filter(context.filter_cache, account_id, context.target_filter,", "else: LOGGER.debug(\"Iterating for all permission sets\") return _get_all_permission_sets_iterator(context) def _get_principal_iterator(context:", "permission_set_id = permission_set_arn.split(\"/\", 2)[-1] if not context.get_permission_set_names: permission_set_name = None", "return [(\"AWS_ACCOUNT\", format_account_id(target))] if isinstance(target, str): if re.match(r\"^\\d+$\", target): return", "if _is_principal_tuple(principal): return [tuple(principal)] else: return _flatten(_process_principal(p) for p in", "= account[\"Name\"] value = (\"AWS_ACCOUNT\", account_id, account_name) if not _filter(context.filter_cache,", "for target_type, target_id, target_name in target_iterator(): for permission_set_arn, permission_set_id, permission_set_name,", "{permission_set_arn.split('/')[-1]} page: {response}\") if not response[\"AccountAssignments\"] and not \"NextToken\" in", "GroupId=principal_id ) LOGGER.debug(f\"DescribeGroup response: {response}\") context.cache[principal_key] = response[\"DisplayName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"):", "namedtuples \"\"\" ids = Ids(lambda: session, instance_arn, identity_store_id) return _list_assignments(", "The SSO instance to use, or it will be looked", "True if the principal should be included. permission_set: A permission", "the same. permission_set_filter: A callable taking permission set arn and", "LOGGER.debug(f\"Principal {principal_type}:{principal_id} does not match principals\") continue principal_key = (principal_type,", "\"AWS_ACCOUNT\": return _get_account_iterator(target, context) elif target_type == \"AWS_OU\": return _get_ou_iterator(target,", "and returning True if the target should be included. get_principal_names", "LOGGER.debug(f\"Visiting principal: {principal_type}:{principal_id}\") yield principal_type, principal_id, principal_name return principal_iterator Assignment", "{target_type}\") return [(target_type, target_id)] else: value = _flatten(_process_target(t) for t", "func: return True if key not in filter_cache: filter_cache[key] =", "re.match(r\"^\\d+$\", target): return [(\"AWS_ACCOUNT\", format_account_id(target))] elif re.match(r\"^r-[a-z0-9]{4,32}$\", target) or re.match(r\"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$\",", "sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\") context.cache[permission_set_arn] = response[\"PermissionSet\"][\"Name\"]", "ids, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False,", "fil try: session = boto3.Session() print(\",\".join(Assignment._fields)) for value in list_assignments(session,", "= context.session.client(\"identitystore\") assignments_paginator = sso_admin_client.get_paginator(\"list_account_assignments\") for response in assignments_paginator.paginate( InstanceArn=context.ids.instance_arn,", "return [permission_set_arn] def _is_target_tuple(target): try: return all([ len(target) == 2,", "ps) for ps in permission_set) if permission_set.startswith(\"arn\"): permission_set_arn = permission_set", "target) return value def _get_account_iterator(target, context: _Context): def target_iterator(): target_name", "(bool): Retrieve names for permission sets in assignments. get_target_names (bool):", "principal_iterator = _get_principal_iterator(context) for target_type, target_id, target_name in target_iterator(): for", "break else: LOGGER.debug(f\"Principal {principal_type}:{principal_id} does not match principals\") continue principal_key", "Ids(lambda: session, instance_arn, identity_store_id) return _list_assignments( session, ids, principal=principal, principal_filter=principal_filter,", "get_target_names=False, ou_recursive=False): principal = _process_principal(principal) permission_set = _process_permission_set(ids, permission_set) target", "aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None elif principal_type == \"USER\": try: response", "principal_id = assignment[\"PrincipalId\"] LOGGER.debug(f\"Visiting principal {principal_type}:{principal_id}\") if context.principal: for principal", "set should be included. target: A target specification or list", "**kwargs): print(\",\".join(v or \"\" for v in value)) except KeyboardInterrupt:", "= func(*args) return filter_cache[key] def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) def _is_principal_tuple(principal):", "= account[\"Id\"] account_name = account[\"Name\"] value = (\"AWS_ACCOUNT\", account_id, account_name)", "permission_set_iterator(target_type, target_id, target_name): if target_type != \"AWS_ACCOUNT\": raise TypeError(f\"Unsupported target", "(which may be None), and returning True if the target", "principal[0] != principal_type) if type_matches and principal[1] == principal_id: LOGGER.debug(f\"Found", "does not match principals\") continue principal_key = (principal_type, principal_id) if", "def _is_principal_tuple(principal): try: return all([ len(principal) == 2, isinstance(principal[0], str),", "def permission_set_iterator(target_type, target_id, target_name): return itertools.chain(*[it(target_type, target_id, target_name) for it", "permission set arn or id, or a list of the", "def _get_single_permission_set_iterator(permission_set, context: _Context): permission_set_arn = permission_set permission_set_id = permission_set_arn.split(\"/\")[-1]", "= {} filter_cache = {} context = _Context( session =", "and target name (which may be None), and returning True", "_Context( session = session, ids=ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target,", "permission_set = _process_permission_set(ids, permission_set) target = _process_target(target) cache = {}", "def _get_permission_set_iterator(context: _Context): if context.permission_set: iterables = [_get_single_permission_set_iterator(ps, context) for", "[ \"instance_arn\", \"principal_type\", \"principal_id\", \"principal_name\", \"permission_set_arn\", \"permission_set_name\", \"target_type\", \"target_id\", \"target_name\",", "type {target_type}\") def _get_all_accounts_iterator(context: _Context): def target_iterator(): organizations_client = context.session.client(\"organizations\")", "principal_id, principal_name return principal_iterator Assignment = collections.namedtuple(\"Assignment\", [ \"instance_arn\", \"principal_type\",", "use if principal names are being retrieved or it will", "= sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\") context.cache[permission_set_arn] =", "raise ValueError(f\"Unknown principal type {principal_type}\") principal_name = context.cache[principal_key] if not", "response[\"PermissionSet\"][\"Name\"] permission_set_name = context.cache[permission_set_arn] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn,", "permission_set_filter: A callable taking permission set arn and name (name", "principal_name = context.cache[principal_key] if not _filter(context.filter_cache, principal_key, context.principal_filter, (principal_type, principal_id,", "will be looked up using ListInstances identity_store_id (str): The identity", "target type {target_type}\") def _get_all_accounts_iterator(context: _Context): def target_iterator(): organizations_client =", "name (which may be None), and returning True if the", "ids.instance_arn, principal_type, principal_id, principal_name, permission_set_arn, permission_set_name, target_type, target_id, target_name, )", ") def _list_assignments( session, ids, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None,", "= organizations_client.describe_account(AccountId=target[1])[\"Account\"] if account.get(\"Name\"): target_name = account[\"Name\"] value = (*target,", "True kwargs[\"target_filter\"] = fil try: session = boto3.Session() print(\",\".join(Assignment._fields)) for", "def _list_assignments( session, ids, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None,", "TypeError(f\"Invalid target type {target_type}\") def _get_all_accounts_iterator(context: _Context): def target_iterator(): organizations_client", "id, or a list of the same. permission_set_filter: A callable", "]) except: return False def _process_principal(principal): if not principal: return", "response = identity_store_client.describe_group( IdentityStoreId=context.ids.identity_store_id, GroupId=principal_id ) LOGGER.debug(f\"DescribeGroup response: {response}\") context.cache[principal_key]", "ValueError(f\"Unknown principal type {principal_type}\") principal_name = context.cache[principal_key] if not _filter(context.filter_cache,", "page: {response}\") for account in response[\"Accounts\"]: account_id = account[\"Id\"] account_name", "is a principal id or a 2-tuple of principal type", "raise TypeError(f\"Unsupported target type {target_type}\") sso_admin_client = context.session.client(\"sso-admin\") identity_store_client =", "True if an OU is provided as a target to", "principal_type = assignment[\"PrincipalType\"] principal_id = assignment[\"PrincipalId\"] LOGGER.debug(f\"Visiting principal {principal_type}:{principal_id}\") if", "A target specification is an account or OU id, or", "\"AWS_ACCOUNT\", account[\"Id\"], account[\"Name\"] return target_iterator def _get_single_target_iterator(target, context: _Context): target_type", "def fil(*args): print(args) return True kwargs[\"target_filter\"] = fil try: session", "return _get_all_accounts_iterator(context) def _get_single_permission_set_iterator(permission_set, context: _Context): permission_set_arn = permission_set permission_set_id", "get_target_names=get_target_names, ou_recursive=ou_recursive, ) def _list_assignments( session, ids, principal=None, principal_filter=None, permission_set=None,", "else: raise ValueError(f\"Unknown principal type {principal_type}\") principal_name = context.cache[principal_key] if", "def principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): if target_type", "\"target_type\", \"target_id\", \"target_name\", ]) def list_assignments( session, instance_arn=None, identity_store_id=None, principal=None,", "def target_iterator(): return itertools.chain(*[it() for it in iterables]) return target_iterator", "permission_set_arn, permission_set_name, target_type, target_id, target_name, ) LOGGER.debug(f\"Visiting assignment: {assignment}\") yield", "in context.principal: type_matches = (principal[0] is None or principal[0] !=", "\"instance_arn\", \"principal_type\", \"principal_id\", \"principal_name\", \"permission_set_arn\", \"permission_set_name\", \"target_type\", \"target_id\", \"target_name\", ])", "returning True if the target should be included. get_principal_names (bool):", "= organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])[\"OrganizationalUnit\"] # if ou.get(\"Name\"): # target_name = ou(\"Name\") value", "A callable taking target type, target id, and target name", "permission_set_name, in permission_set_iterator(target_type, target_id, target_name): for principal_type, principal_id, principal_name in", "ou_recursive=False): \"\"\"Iterate over AWS SSO assignments. Args: session (boto3.Session): boto3", "if type_matches and principal[1] == principal_id: LOGGER.debug(f\"Found principal {principal_type}:{principal_id}\") break", "context.target: iterables = [_get_single_target_iterator(t, context) for t in context.target] def", "specifications. A target specification is an account or OU id,", "target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): \"\"\"Iterate over AWS SSO assignments.", "assignments. get_target_names (bool): Retrieve names for targets in assignments. ou_recursive", "target_name, ) LOGGER.debug(f\"Visiting assignment: {assignment}\") yield assignment if __name__ ==", "if not response[\"AccountAssignments\"] and not \"NextToken\" in response: LOGGER.debug(f\"No assignments", "== \"__main__\": import boto3 import sys import json logging.basicConfig(level=logging.INFO) kwargs", "target_type not in [\"AWS_ACCOUNT\", \"AWS_OU\"]: raise TypeError(f\"Invalid target type {target_type}\")", "= assignment[\"PrincipalType\"] principal_id = assignment[\"PrincipalId\"] LOGGER.debug(f\"Visiting principal {principal_type}:{principal_id}\") if context.principal:", "AWS_ACCOUNT or AWS_OU, and target id. target_filter: A callable taking", "return [(\"AWS_OU\", target)] else: raise TypeError(f\"Invalid target {target}\") elif _is_target_tuple(target):", "if not func: return True if key not in filter_cache:", "callable taking principal type, principal id, and principal name (which", "or permission_set.startswith(\"ins-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{permission_set}\" elif permission_set.startswith(\"ps-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}\"", "elif permission_set.startswith(\"ps-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}\" else: raise TypeError(f\"Invalid permission set", "as a target to get all accounts including those in", "Iterable import itertools import aws_error_utils from .lookup import Ids, lookup_accounts_for_ou", "id or a 2-tuple of principal type and id. principal_filter:", "= None else: sso_admin_client = context.session.client(\"sso-admin\") response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn,", "permission_set: return None if not isinstance(permission_set, str) and isinstance(permission_set, Iterable):", "account in response[\"Accounts\"]: account_id = account[\"Id\"] account_name = account[\"Name\"] value", "\"AWS_ACCOUNT\": raise TypeError(f\"Unsupported target type {target_type}\") sso_admin_client = context.session.client(\"sso-admin\") identity_store_client", "principal_id: LOGGER.debug(f\"Found principal {principal_type}:{principal_id}\") break else: LOGGER.debug(f\"Principal {principal_type}:{principal_id} does not", "specification is an account or OU id, or a 2-tuple", "None if isinstance(target, numbers.Number): return [(\"AWS_ACCOUNT\", format_account_id(target))] if isinstance(target, str):", "in child OUs. Returns: An iterator over Assignment namedtuples \"\"\"", "_filter(filter_cache, key, func, args): if not func: return True if", "import numbers import collections import logging from collections.abc import Iterable", "SSO assignments. Args: session (boto3.Session): boto3 session to use instance_arn", "AccountId=target_id): LOGGER.debug(f\"ListPermissionSetsProvisionedToAccount {target_id} page: {response}\") if \"PermissionSets\" not in response:", "session, ids=ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names,", ") LOGGER.debug(f\"DescribePermissionSet response: {response}\") permission_set_name = response[\"PermissionSet\"][\"Name\"] if not _filter(context.filter_cache,", "= (principal[0] is None or principal[0] != principal_type) if type_matches", "permission_set_name = None else: if permission_set_arn not in context.cache: response", "principal)] if _is_principal_tuple(principal): return [tuple(principal)] else: return _flatten(_process_principal(p) for p", "permission_set_id, permission_set_name, in permission_set_iterator(target_type, target_id, target_name): for principal_type, principal_id, principal_name", "{principal_type}:{principal_id}\") if context.principal: for principal in context.principal: type_matches = (principal[0]", "try: return all([ len(target) == 2, isinstance(target[0], str), target[0] in", "LOGGER.debug(f\"Iterating for all accounts\") return _get_all_accounts_iterator(context) def _get_single_permission_set_iterator(permission_set, context: _Context):", "target[0] in [\"AWS_OU\", \"AWS_ACCOUNT\"], isinstance(target[1], str), ]) except: return False", "]) def list_assignments( session, instance_arn=None, identity_store_id=None, principal=None, principal_filter=None, permission_set=None, permission_set_filter=None,", "if target_type == \"AWS_ACCOUNT\": return _get_account_iterator(target, context) elif target_type ==", "target = _process_target(target) cache = {} filter_cache = {} context", "permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): principal =", "target_id)] else: value = _flatten(_process_target(t) for t in target) return", "LOGGER.debug(f\"Visiting single account: {value}\") yield value return target_iterator def _get_ou_iterator(target,", "context.session.client(\"sso-admin\") identity_store_client = context.session.client(\"identitystore\") assignments_paginator = sso_admin_client.get_paginator(\"list_account_assignments\") for response in", "for it in iterables]) return permission_set_iterator else: LOGGER.debug(\"Iterating for all", "_get_target_iterator(context: _Context): if context.target: iterables = [_get_single_target_iterator(t, context) for t", "raise TypeError(f\"Invalid target type {target_type}\") def _get_all_accounts_iterator(context: _Context): def target_iterator():", "if the permission set should be included. target: A target", "isinstance(target, str): if re.match(r\"^\\d+$\", target): return [(\"AWS_ACCOUNT\", format_account_id(target))] elif re.match(r\"^r-[a-z0-9]{4,32}$\",", "import collections import logging from collections.abc import Iterable import itertools", "return value def _get_account_iterator(target, context: _Context): def target_iterator(): target_name =", "t in context.target] def target_iterator(): return itertools.chain(*[it() for it in", "None else: if permission_set_arn not in context.cache: response = sso_admin_client.describe_permission_set(", "context.get_permission_set_names: permission_set_name = None else: sso_admin_client = context.session.client(\"sso-admin\") response =", "target_iterator def _get_target_iterator(context: _Context): if context.target: iterables = [_get_single_target_iterator(t, context)", "{target_id} {permission_set_arn.split('/')[-1]}\") for assignment in response[\"AccountAssignments\"]: principal_type = assignment[\"PrincipalType\"] principal_id", "not _filter(context.filter_cache, account_id, context.target_filter, value): LOGGER.debug(f\"Account is filtered: {value}\") continue", "session, instance_arn, identity_store_id) return _list_assignments( session, ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set,", "not isinstance(permission_set, str) and isinstance(permission_set, Iterable): return _flatten(_process_permission_set(ids, ps) for", "target_id, target_name): if target_type != \"AWS_ACCOUNT\": raise TypeError(f\"Unsupported target type", "{(permission_set_id, permission_set_name)}\") else: LOGGER.debug(f\"Visiting single permission set {(permission_set_id, permission_set_name)}\") yield", "import boto3 import sys import json logging.basicConfig(level=logging.INFO) kwargs = {}", "is None or principal[0] != principal_type) if type_matches and principal[1]", "from .format import format_account_id LOGGER = logging.getLogger(__name__) _Context = collections.namedtuple(\"_Context\",", "permission_set.startswith(\"ins-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{permission_set}\" elif permission_set.startswith(\"ps-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}\" else:", "except: return False def _process_target(target): if not target: return None", "elif principal_type == \"USER\": try: response = identity_store_client.describe_user( IdentityStoreId=context.ids.identity_store_id, UserId=principal_id", "target_id, target_name in target_iterator(): for permission_set_arn, permission_set_id, permission_set_name, in permission_set_iterator(target_type,", "target_name = account[\"Name\"] value = (*target, target_name) if not _filter(context.filter_cache,", "or a list of the same. permission_set_filter: A callable taking", "== \"GROUP\": try: response = identity_store_client.describe_group( IdentityStoreId=context.ids.identity_store_id, GroupId=principal_id ) LOGGER.debug(f\"DescribeGroup", "or id, or a list of the same. permission_set_filter: A", "for v in sys.argv[1:]: if hasattr(logging, v): LOGGER.setLevel(getattr(logging, v)) else:", "collections.namedtuple(\"_Context\", [ \"session\", \"ids\", \"principal\", \"principal_filter\", \"permission_set\", \"permission_set_filter\", \"target\", \"target_filter\",", "if permission_set_arn not in context.cache: response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn", "from .lookup import Ids, lookup_accounts_for_ou from .format import format_account_id LOGGER", "filter_cache[key] def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) def _is_principal_tuple(principal): try: return all([", "= None else: if permission_set_arn not in context.cache: response =", "it in iterables]) return permission_set_iterator else: LOGGER.debug(\"Iterating for all permission", "{(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def _get_all_permission_sets_iterator(context:", "ou.get(\"Name\"): # target_name = ou(\"Name\") value = (*target, target_name) accounts", "response: {response}\") context.cache[permission_set_arn] = response[\"PermissionSet\"][\"Name\"] permission_set_name = context.cache[permission_set_arn] if not", "LOGGER.debug(f\"Visiting permission set: {(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name return", "\"principal_id\", \"principal_name\", \"permission_set_arn\", \"permission_set_name\", \"target_type\", \"target_id\", \"target_name\", ]) def list_assignments(", "accounts = lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive) for account in accounts: yield", "and not \"NextToken\" in response: LOGGER.debug(f\"No assignments for {target_id} {permission_set_arn.split('/')[-1]}\")", "{principal_type}:{principal_id}\") continue LOGGER.debug(f\"Visiting principal: {principal_type}:{principal_id}\") yield principal_type, principal_id, principal_name return", "permission_set.startswith(\"arn\"): permission_set_arn = permission_set elif permission_set.startswith(\"ssoins-\") or permission_set.startswith(\"ins-\"): permission_set_arn =", "AWS_OU, and target id. target_filter: A callable taking target type,", "{value}\") yield value return target_iterator def _get_ou_iterator(target, context: _Context): def", "target_name, permission_set_arn, permission_set_id, permission_set_name): assignment = Assignment( ids.instance_arn, principal_type, principal_id,", "return False def _process_principal(principal): if not principal: return None if", "session = session, ids=ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter,", "type and id. principal_filter: A callable taking principal type, principal", "if not isinstance(permission_set, str) and isinstance(permission_set, Iterable): return _flatten(_process_permission_set(ids, ps)", "context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Single permission set is filtered: {(permission_set_id, permission_set_name)}\")", "is filtered: {principal_type}:{principal_id}\") else: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") continue LOGGER.debug(f\"Visiting", "organizations_client.get_paginator(\"list_accounts\") for response in accounts_paginator.paginate(): LOGGER.debug(f\"ListAccounts page: {response}\") for account", "_process_principal(principal) permission_set = _process_permission_set(ids, permission_set) target = _process_target(target) cache =", "= permission_set elif permission_set.startswith(\"ssoins-\") or permission_set.startswith(\"ins-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{permission_set}\" elif", "get_target_names=False, ou_recursive=False): \"\"\"Iterate over AWS SSO assignments. Args: session (boto3.Session):", "in target_iterator(): for permission_set_arn, permission_set_id, permission_set_name, in permission_set_iterator(target_type, target_id, target_name):", "Assignment( ids.instance_arn, principal_type, principal_id, principal_name, permission_set_arn, permission_set_name, target_type, target_id, target_name,", "context.session.client(\"organizations\") accounts_paginator = organizations_client.get_paginator(\"list_accounts\") for response in accounts_paginator.paginate(): LOGGER.debug(f\"ListAccounts page:", "[\"GROUP\", \"USER\"], isinstance(principal[1], str), ]) except: return False def _process_principal(principal):", "= identity_store_client.describe_group( IdentityStoreId=context.ids.identity_store_id, GroupId=principal_id ) LOGGER.debug(f\"DescribeGroup response: {response}\") context.cache[principal_key] =", "\"__main__\": import boto3 import sys import json logging.basicConfig(level=logging.INFO) kwargs =", "# target_name = ou(\"Name\") value = (*target, target_name) accounts =", "Args: session (boto3.Session): boto3 session to use instance_arn (str): The", "target_type, target_id, target_name in target_iterator(): for permission_set_arn, permission_set_id, permission_set_name, in", "target if target_type not in [\"AWS_ACCOUNT\", \"AWS_OU\"]: raise TypeError(f\"Invalid target", "None elif principal_type == \"USER\": try: response = identity_store_client.describe_user( IdentityStoreId=context.ids.identity_store_id,", "response in accounts_paginator.paginate(): LOGGER.debug(f\"ListAccounts page: {response}\") for account in response[\"Accounts\"]:", "account.get(\"Name\"): target_name = account[\"Name\"] value = (*target, target_name) if not", "permission_set: A permission set arn or id, or a list", "OU id, or a 2-tuple of target type, which is", "in filter_cache: filter_cache[key] = func(*args) return filter_cache[key] def _flatten(list_of_lists): return", "target_id = target if target_type not in [\"AWS_ACCOUNT\", \"AWS_OU\"]: raise", "str), principal[0] in [\"GROUP\", \"USER\"], isinstance(principal[1], str), ]) except: return", "target_name) if not _filter(context.filter_cache, value[1], context.target_filter, value): LOGGER.debug(f\"Account is filtered:", "= {} for v in sys.argv[1:]: if hasattr(logging, v): LOGGER.setLevel(getattr(logging,", "str), ]) except: return False def _process_target(target): if not target:", "sso_admin_client = context.session.client(\"sso-admin\") response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet", "= (\"AWS_ACCOUNT\", account_id, account_name) if not _filter(context.filter_cache, account_id, context.target_filter, value):", "2, isinstance(principal[0], str), principal[0] in [\"GROUP\", \"USER\"], isinstance(principal[1], str), ])", "numbers.Number): return [(\"AWS_ACCOUNT\", format_account_id(target))] if isinstance(target, str): if re.match(r\"^\\d+$\", target):", "permission_set_iterator def _get_permission_set_iterator(context: _Context): if context.permission_set: iterables = [_get_single_permission_set_iterator(ps, context)", "= collections.namedtuple(\"Assignment\", [ \"instance_arn\", \"principal_type\", \"principal_id\", \"principal_name\", \"permission_set_arn\", \"permission_set_name\", \"target_type\",", "aws_error_utils from .lookup import Ids, lookup_accounts_for_ou from .format import format_account_id", "else: return _flatten(_process_principal(p) for p in principal) def _process_permission_set(ids, permission_set):", "None), and returning True if the principal should be included.", "target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, ) def _list_assignments( session,", "elif re.match(r\"^r-[a-z0-9]{4,32}$\", target) or re.match(r\"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$\", target): return [(\"AWS_OU\", target)] else:", "target_name, permission_set_arn, permission_set_id, permission_set_name): if target_type != \"AWS_ACCOUNT\": raise TypeError(f\"Unsupported", "principal_iterator Assignment = collections.namedtuple(\"Assignment\", [ \"instance_arn\", \"principal_type\", \"principal_id\", \"principal_name\", \"permission_set_arn\",", "yield value return target_iterator def _get_ou_iterator(target, context: _Context): def target_iterator():", "target): return [(\"AWS_ACCOUNT\", format_account_id(target))] elif re.match(r\"^r-[a-z0-9]{4,32}$\", target) or re.match(r\"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$\", target):", "return True kwargs[\"target_filter\"] = fil try: session = boto3.Session() print(\",\".join(Assignment._fields))", "_Context): def permission_set_iterator(target_type, target_id, target_name): if target_type != \"AWS_ACCOUNT\": raise", "all accounts\") return _get_all_accounts_iterator(context) def _get_single_permission_set_iterator(permission_set, context: _Context): permission_set_arn =", "context.session.client(\"identitystore\") assignments_paginator = sso_admin_client.get_paginator(\"list_account_assignments\") for response in assignments_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id,", "import itertools import aws_error_utils from .lookup import Ids, lookup_accounts_for_ou from", "permission_set_iterator(target_type, target_id, target_name): if not context.get_permission_set_names: permission_set_name = None else:", "= identity_store_client.describe_user( IdentityStoreId=context.ids.identity_store_id, UserId=principal_id ) LOGGER.debug(f\"DescribeUser response: {response}\") context.cache[principal_key] =", "= _process_permission_set(ids, permission_set) target = _process_target(target) cache = {} filter_cache", "= _process_principal(principal) permission_set = _process_permission_set(ids, permission_set) target = _process_target(target) cache", "assignment in response[\"AccountAssignments\"]: principal_type = assignment[\"PrincipalType\"] principal_id = assignment[\"PrincipalId\"] LOGGER.debug(f\"Visiting", "{value}\") yield value return target_iterator def _get_target_iterator(context: _Context): if context.target:", "or it will be looked up using ListInstances identity_store_id (str):", "return list(itertools.chain(*list_of_lists)) def _is_principal_tuple(principal): try: return all([ len(principal) == 2,", "import aws_error_utils from .lookup import Ids, lookup_accounts_for_ou from .format import", "= response[\"PermissionSet\"][\"Name\"] permission_set_name = context.cache[permission_set_arn] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter,", "p in principal) def _process_permission_set(ids, permission_set): if not permission_set: return", "_is_target_tuple(target): target_type, target_id = target if target_type not in [\"AWS_ACCOUNT\",", "_is_principal_tuple(principal): try: return all([ len(principal) == 2, isinstance(principal[0], str), principal[0]", "single permission set {(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name return", "target specifications. A target specification is an account or OU", "numbers import collections import logging from collections.abc import Iterable import", "for it in iterables]) return target_iterator else: LOGGER.debug(f\"Iterating for all", "target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): if target_type != \"AWS_ACCOUNT\":", "{target_id} {permission_set_arn.split('/')[-1]} page: {response}\") if not response[\"AccountAssignments\"] and not \"NextToken\"", "= f\"arn:aws:sso:::permissionSet/{permission_set}\" elif permission_set.startswith(\"ps-\"): permission_set_arn = f\"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}\" else: raise TypeError(f\"Invalid", "A callable taking permission set arn and name (name may", "target specification or list of target specifications. A target specification", "from collections.abc import Iterable import itertools import aws_error_utils from .lookup", "aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None else: raise ValueError(f\"Unknown principal type {principal_type}\")", "isinstance(permission_set, Iterable): return _flatten(_process_permission_set(ids, ps) for ps in permission_set) if", "def _process_target(target): if not target: return None if isinstance(target, numbers.Number):", "context.principal: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") else: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\")", "return filter_cache[key] def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) def _is_principal_tuple(principal): try: return", "response: LOGGER.debug(f\"No assignments for {target_id} {permission_set_arn.split('/')[-1]}\") for assignment in response[\"AccountAssignments\"]:", "\"permission_set_name\", \"target_type\", \"target_id\", \"target_name\", ]) def list_assignments( session, instance_arn=None, identity_store_id=None,", "permission_set_iterator def _get_all_permission_sets_iterator(context: _Context): def permission_set_iterator(target_type, target_id, target_name): if target_type", "permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): principal = _process_principal(principal)", "in iterables]) return permission_set_iterator else: LOGGER.debug(\"Iterating for all permission sets\")", "else: sso_admin_client = context.session.client(\"sso-admin\") response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn )", "names for principals in assignments. get_permission_set_names (bool): Retrieve names for", "boto3 import sys import json logging.basicConfig(level=logging.INFO) kwargs = {} for", "\"ids\", \"principal\", \"principal_filter\", \"permission_set\", \"permission_set_filter\", \"target\", \"target_filter\", \"get_principal_names\", \"get_permission_set_names\", \"get_target_names\",", "an OU is provided as a target to get all", "def _process_permission_set(ids, permission_set): if not permission_set: return None if not", "if the principal should be included. permission_set: A permission set", "= context.session.client(\"sso-admin\") response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response:", "iterator over Assignment namedtuples \"\"\" ids = Ids(lambda: session, instance_arn,", "__name__ == \"__main__\": import boto3 import sys import json logging.basicConfig(level=logging.INFO)", "assignment = Assignment( ids.instance_arn, principal_type, principal_id, principal_name, permission_set_arn, permission_set_name, target_type,", "target: return None if isinstance(target, numbers.Number): return [(\"AWS_ACCOUNT\", format_account_id(target))] if", "principal = _process_principal(principal) permission_set = _process_permission_set(ids, permission_set) target = _process_target(target)", "\"AWS_OU\"]: raise TypeError(f\"Invalid target type {target_type}\") return [(target_type, target_id)] else:", "= permission_set_arn.split(\"/\")[-1] def permission_set_iterator(target_type, target_id, target_name): if not context.get_permission_set_names: permission_set_name", "except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None elif principal_type == \"USER\": try:", "account[\"Name\"] value = (*target, target_name) if not _filter(context.filter_cache, value[1], context.target_filter,", "_list_assignments( session, ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names,", "raise TypeError(f\"Unsupported target type {target_type}\") sso_admin_client = context.session.client(\"sso-admin\") permission_sets_paginator =", "import logging from collections.abc import Iterable import itertools import aws_error_utils", "not in context.cache: if principal_type == \"GROUP\": try: response =", "specifications. A principal specification is a principal id or a", "_get_permission_set_iterator(context) principal_iterator = _get_principal_iterator(context) for target_type, target_id, target_name in target_iterator():", "return _list_assignments( session, ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter,", "(name may be None), returning True if the permission set", "for targets in assignments. ou_recursive (bool): Set to True if", "return permission_set_iterator def _get_all_permission_sets_iterator(context: _Context): def permission_set_iterator(target_type, target_id, target_name): if", "ou(\"Name\") value = (*target, target_name) accounts = lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive)", "permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Permission set is filtered: {(permission_set_id, permission_set_name)}\")", "not _filter(context.filter_cache, principal_key, context.principal_filter, (principal_type, principal_id, principal_name)): if context.principal: LOGGER.debug(f\"Principal", "iterables]) return target_iterator else: LOGGER.debug(f\"Iterating for all accounts\") return _get_all_accounts_iterator(context)", "{response}\") context.cache[permission_set_arn] = response[\"PermissionSet\"][\"Name\"] permission_set_name = context.cache[permission_set_arn] if not _filter(context.filter_cache,", "context.get_permission_set_names: permission_set_name = None else: if permission_set_arn not in context.cache:", "or AWS_OU, and target id. target_filter: A callable taking target", "principal specification or list of principal specifications. A principal specification", "A principal specification or list of principal specifications. A principal", "return principal_iterator Assignment = collections.namedtuple(\"Assignment\", [ \"instance_arn\", \"principal_type\", \"principal_id\", \"principal_name\",", "session to use instance_arn (str): The SSO instance to use,", "taking permission set arn and name (name may be None),", "id. target_filter: A callable taking target type, target id, and", "in permission_sets_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id): LOGGER.debug(f\"ListPermissionSetsProvisionedToAccount {target_id} page: {response}\") if \"PermissionSets\"", "return _flatten(_process_permission_set(ids, ps) for ps in permission_set) if permission_set.startswith(\"arn\"): permission_set_arn", "targets in assignments. ou_recursive (bool): Set to True if an", "if context.principal: for principal in context.principal: type_matches = (principal[0] is", "if not _filter(context.filter_cache, value[1], context.target_filter, value): LOGGER.debug(f\"Account is filtered: {value}\")", "not in filter_cache: filter_cache[key] = func(*args) return filter_cache[key] def _flatten(list_of_lists):", "\"ou_recursive\", \"cache\", \"filter_cache\" ]) def _filter(filter_cache, key, func, args): if", "for ps in permission_set) if permission_set.startswith(\"arn\"): permission_set_arn = permission_set elif", "context.cache[principal_key] = None elif principal_type == \"USER\": try: response =", "response[\"DisplayName\"] except aws_error_utils.catch_aws_error(\"ResourceNotFoundException\"): context.cache[principal_key] = None elif principal_type == \"USER\":", "raise TypeError(f\"Invalid target {target}\") elif _is_target_tuple(target): target_type, target_id = target", "\"session\", \"ids\", \"principal\", \"principal_filter\", \"permission_set\", \"permission_set_filter\", \"target\", \"target_filter\", \"get_principal_names\", \"get_permission_set_names\",", "target type {target_type}\") return [(target_type, target_id)] else: value = _flatten(_process_target(t)", "if not _filter(context.filter_cache, account_id, context.target_filter, value): LOGGER.debug(f\"Account is filtered: {value}\")", "account[\"Name\"] value = (\"AWS_ACCOUNT\", account_id, account_name) if not _filter(context.filter_cache, account_id,", "json.loads(v) def fil(*args): print(args) return True kwargs[\"target_filter\"] = fil try:", "func(*args) return filter_cache[key] def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) def _is_principal_tuple(principal): try:", "else: if permission_set_arn not in context.cache: response = sso_admin_client.describe_permission_set( InstanceArn=context.ids.instance_arn,", "context) for t in context.target] def target_iterator(): return itertools.chain(*[it() for", "permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def _get_permission_set_iterator(context: _Context):", "ou_recursive=False): principal = _process_principal(principal) permission_set = _process_permission_set(ids, permission_set) target =", "permission_set_name): assignment = Assignment( ids.instance_arn, principal_type, principal_id, principal_name, permission_set_arn, permission_set_name,", "and id. principal_filter: A callable taking principal type, principal id,", "context = _Context( session = session, ids=ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set,", "of target type, which is either AWS_ACCOUNT or AWS_OU, and", "{principal_type}:{principal_id}\") break else: LOGGER.debug(f\"Principal {principal_type}:{principal_id} does not match principals\") continue", "target_name) accounts = lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive) for account in accounts:", "_Context): if context.permission_set: iterables = [_get_single_permission_set_iterator(ps, context) for ps in", "InstanceArn=context.ids.instance_arn, PermissionSetArn=permission_set_arn ) LOGGER.debug(f\"DescribePermissionSet response: {response}\") permission_set_name = response[\"PermissionSet\"][\"Name\"] if", "_get_ou_iterator(target, context) else: raise TypeError(f\"Invalid target type {target_type}\") def _get_all_accounts_iterator(context:", "The identity store to use if principal names are being", "of target specifications. A target specification is an account or", "A callable taking principal type, principal id, and principal name", "itertools.chain(*[it() for it in iterables]) return target_iterator else: LOGGER.debug(f\"Iterating for", "in list_assignments(session, **kwargs): print(\",\".join(v or \"\" for v in value))", "in response[\"PermissionSets\"]: permission_set_id = permission_set_arn.split(\"/\", 2)[-1] if not context.get_permission_set_names: permission_set_name", "account = organizations_client.describe_account(AccountId=target[1])[\"Account\"] if account.get(\"Name\"): target_name = account[\"Name\"] value =", "re.match(r\"^r-[a-z0-9]{4,32}$\", target) or re.match(r\"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$\", target): return [(\"AWS_OU\", target)] else: raise", "permission_set_arn.split(\"/\")[-1] def permission_set_iterator(target_type, target_id, target_name): if not context.get_permission_set_names: permission_set_name =", "permission_set_iterator = _get_permission_set_iterator(context) principal_iterator = _get_principal_iterator(context) for target_type, target_id, target_name", "use instance_arn (str): The SSO instance to use, or it", "context.target] def target_iterator(): return itertools.chain(*[it() for it in iterables]) return", "(principal[0] is None or principal[0] != principal_type) if type_matches and", "import re import numbers import collections import logging from collections.abc", "target_id, target_name) for it in iterables]) return permission_set_iterator else: LOGGER.debug(\"Iterating", "{assignment}\") yield assignment if __name__ == \"__main__\": import boto3 import", "target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, cache=cache, filter_cache=filter_cache, ) target_iterator =", "\"\"\"Iterate over AWS SSO assignments. Args: session (boto3.Session): boto3 session", "else: LOGGER.debug(f\"Principal {principal_type}:{principal_id} does not match principals\") continue principal_key =", "in response[\"AccountAssignments\"]: principal_type = assignment[\"PrincipalType\"] principal_id = assignment[\"PrincipalId\"] LOGGER.debug(f\"Visiting principal", "= context.session.client(\"sso-admin\") identity_store_client = context.session.client(\"identitystore\") assignments_paginator = sso_admin_client.get_paginator(\"list_account_assignments\") for response", "target to get all accounts including those in child OUs.", "[\"AWS_OU\", \"AWS_ACCOUNT\"], isinstance(target[1], str), ]) except: return False def _process_target(target):", "LOGGER.debug(f\"DescribePermissionSet response: {response}\") permission_set_name = response[\"PermissionSet\"][\"Name\"] if not _filter(context.filter_cache, permission_set_arn,", "permission_set_iterator else: LOGGER.debug(\"Iterating for all permission sets\") return _get_all_permission_sets_iterator(context) def", "= (*target, target_name) if not _filter(context.filter_cache, value[1], context.target_filter, value): LOGGER.debug(f\"Account", "organizations_client = context.session.client(\"organizations\") account = organizations_client.describe_account(AccountId=target[1])[\"Account\"] if account.get(\"Name\"): target_name =", "return target_iterator def _get_target_iterator(context: _Context): if context.target: iterables = [_get_single_target_iterator(t,", "for principal in context.principal: type_matches = (principal[0] is None or", "elif _is_target_tuple(target): target_type, target_id = target if target_type not in", "_flatten(_process_principal(p) for p in principal) def _process_permission_set(ids, permission_set): if not", "= None if context.get_target_names: organizations_client = context.session.client(\"organizations\") account = organizations_client.describe_account(AccountId=target[1])[\"Account\"]", "in assignments_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id, PermissionSetArn=permission_set_arn): LOGGER.debug(f\"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page:", "names for targets in assignments. ou_recursive (bool): Set to True", "response[\"AccountAssignments\"] and not \"NextToken\" in response: LOGGER.debug(f\"No assignments for {target_id}", "not context.get_permission_set_names: permission_set_name = None else: if permission_set_arn not in", "if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Single permission set", "_process_permission_set(ids, permission_set) target = _process_target(target) cache = {} filter_cache =", "_get_ou_iterator(target, context: _Context): def target_iterator(): target_name = None # if", "permission_set_arn, permission_set_id, permission_set_name, in permission_set_iterator(target_type, target_id, target_name): for principal_type, principal_id,", "accounts including those in child OUs. Returns: An iterator over", "Set to True if an OU is provided as a", "return False def _process_target(target): if not target: return None if", "not response[\"AccountAssignments\"] and not \"NextToken\" in response: LOGGER.debug(f\"No assignments for", "format_account_id(target))] elif re.match(r\"^r-[a-z0-9]{4,32}$\", target) or re.match(r\"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$\", target): return [(\"AWS_OU\", target)]", "principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter, target=target, target_filter=target_filter, get_principal_names=get_principal_names, get_permission_set_names=get_permission_set_names, get_target_names=get_target_names, ou_recursive=ou_recursive, cache=cache,", "type {target_type}\") sso_admin_client = context.session.client(\"sso-admin\") identity_store_client = context.session.client(\"identitystore\") assignments_paginator =", "target_iterator def _get_ou_iterator(target, context: _Context): def target_iterator(): target_name = None", "principal id, and principal name (which may be None), and", "target_filter: A callable taking target type, target id, and target", "_process_target(target) cache = {} filter_cache = {} context = _Context(", "account_id = account[\"Id\"] account_name = account[\"Name\"] value = (\"AWS_ACCOUNT\", account_id,", "return itertools.chain(*[it() for it in iterables]) return target_iterator else: LOGGER.debug(f\"Iterating", "def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists)) def _is_principal_tuple(principal): try: return all([ len(principal)", "_filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Permission set is filtered: {(permission_set_id,", "None else: if principal_key not in context.cache: if principal_type ==", "and target id. target_filter: A callable taking target type, target", "all([ len(principal) == 2, isinstance(principal[0], str), principal[0] in [\"GROUP\", \"USER\"],", "principal_type == \"USER\": try: response = identity_store_client.describe_user( IdentityStoreId=context.ids.identity_store_id, UserId=principal_id )", "\"AWS_OU\": return _get_ou_iterator(target, context) else: raise TypeError(f\"Invalid target type {target_type}\")", "]) except: return False def _process_target(target): if not target: return", "permission_set_arn.split(\"/\", 2)[-1] if not context.get_permission_set_names: permission_set_name = None else: if", "get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): \"\"\"Iterate over AWS SSO assignments. Args:", "is filtered: {principal_type}:{principal_id}\") continue LOGGER.debug(f\"Visiting principal: {principal_type}:{principal_id}\") yield principal_type, principal_id,", "return target_iterator else: LOGGER.debug(f\"Iterating for all accounts\") return _get_all_accounts_iterator(context) def", "permission set should be included. target: A target specification or", "for t in context.target] def target_iterator(): return itertools.chain(*[it() for it", "page: {response}\") if not response[\"AccountAssignments\"] and not \"NextToken\" in response:", "else: LOGGER.debug(f\"Iterating for all accounts\") return _get_all_accounts_iterator(context) def _get_single_permission_set_iterator(permission_set, context:", "else: LOGGER.debug(f\"Visiting single account: {value}\") yield value return target_iterator def", "context: _Context): def target_iterator(): target_name = None # if context.get_target_names:", "yield principal_type, principal_id, principal_name return principal_iterator Assignment = collections.namedtuple(\"Assignment\", [", "func, args): if not func: return True if key not", "LOGGER.debug(f\"Account is filtered: {value}\") continue LOGGER.debug(f\"Visiting account: {value}\") yield value", "ou_recursive=ou_recursive, cache=cache, filter_cache=filter_cache, ) target_iterator = _get_target_iterator(context) permission_set_iterator = _get_permission_set_iterator(context)", "set id {permission_set}\") return [permission_set_arn] def _is_target_tuple(target): try: return all([", "{permission_set}\") return [permission_set_arn] def _is_target_tuple(target): try: return all([ len(target) ==", "[tuple(principal)] else: return _flatten(_process_principal(p) for p in principal) def _process_permission_set(ids,", "if the target should be included. get_principal_names (bool): Retrieve names", "principal in context.principal: type_matches = (principal[0] is None or principal[0]", "return permission_set_iterator else: LOGGER.debug(\"Iterating for all permission sets\") return _get_all_permission_sets_iterator(context)", "False def _process_principal(principal): if not principal: return None if isinstance(principal,", "{principal_type}\") principal_name = context.cache[principal_key] if not _filter(context.filter_cache, principal_key, context.principal_filter, (principal_type,", "raise TypeError(f\"Invalid permission set id {permission_set}\") return [permission_set_arn] def _is_target_tuple(target):", "ids = Ids(lambda: session, instance_arn, identity_store_id) return _list_assignments( session, ids,", "_Context): target_type = target[0] if target_type == \"AWS_ACCOUNT\": return _get_account_iterator(target,", "in permission_set) if permission_set.startswith(\"arn\"): permission_set_arn = permission_set elif permission_set.startswith(\"ssoins-\") or", "collections.namedtuple(\"Assignment\", [ \"instance_arn\", \"principal_type\", \"principal_id\", \"principal_name\", \"permission_set_arn\", \"permission_set_name\", \"target_type\", \"target_id\",", "LOGGER.debug(\"Iterating for all permission sets\") return _get_all_permission_sets_iterator(context) def _get_principal_iterator(context: _Context):", "context) elif target_type == \"AWS_OU\": return _get_ou_iterator(target, context) else: raise", "= collections.namedtuple(\"_Context\", [ \"session\", \"ids\", \"principal\", \"principal_filter\", \"permission_set\", \"permission_set_filter\", \"target\",", "list of the same. permission_set_filter: A callable taking permission set", "permission_set_name = context.cache[permission_set_arn] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)):", "filtered: {(permission_set_id, permission_set_name)}\") continue LOGGER.debug(f\"Visiting permission set: {(permission_set_id, permission_set_name)}\") yield", "not principal: return None if isinstance(principal, str): return [(None, principal)]", "and principal[1] == principal_id: LOGGER.debug(f\"Found principal {principal_type}:{principal_id}\") break else: LOGGER.debug(f\"Principal", "= account[\"Name\"] value = (*target, target_name) if not _filter(context.filter_cache, value[1],", "continue LOGGER.debug(f\"Visiting account: {value}\") yield value return target_iterator def _get_target_iterator(context:", "instance_arn, identity_store_id) return _list_assignments( session, ids, principal=principal, principal_filter=principal_filter, permission_set=permission_set, permission_set_filter=permission_set_filter,", "filter_cache: filter_cache[key] = func(*args) return filter_cache[key] def _flatten(list_of_lists): return list(itertools.chain(*list_of_lists))", "looked up using ListInstances principal: A principal specification or list", "_filter(context.filter_cache, account_id, context.target_filter, value): LOGGER.debug(f\"Account is filtered: {value}\") continue LOGGER.debug(f\"Visiting", "if hasattr(logging, v): LOGGER.setLevel(getattr(logging, v)) else: kwargs = json.loads(v) def", "= (*target, target_name) accounts = lookup_accounts_for_ou(context.session, value[1], recursive=context.ou_recursive) for account", "{target_id} page: {response}\") if \"PermissionSets\" not in response: continue for", "if re.match(r\"^\\d+$\", target): return [(\"AWS_ACCOUNT\", format_account_id(target))] elif re.match(r\"^r-[a-z0-9]{4,32}$\", target) or", "principal: A principal specification or list of principal specifications. A", "permission_set_name return permission_set_iterator def _get_permission_set_iterator(context: _Context): if context.permission_set: iterables =", "principal_filter=None, permission_set=None, permission_set_filter=None, target=None, target_filter=None, get_principal_names=False, get_permission_set_names=False, get_target_names=False, ou_recursive=False): principal", "permission_set_name = None else: sso_admin_client = context.session.client(\"sso-admin\") response = sso_admin_client.describe_permission_set(", "def _get_target_iterator(context: _Context): if context.target: iterables = [_get_single_target_iterator(t, context) for", "Retrieve names for permission sets in assignments. get_target_names (bool): Retrieve", "]) def _filter(filter_cache, key, func, args): if not func: return", "TypeError(f\"Invalid permission set id {permission_set}\") return [permission_set_arn] def _is_target_tuple(target): try:", "is filtered: {value}\") else: LOGGER.debug(f\"Visiting single account: {value}\") yield value", "if not _filter(context.filter_cache, principal_key, context.principal_filter, (principal_type, principal_id, principal_name)): if context.principal:", "a 2-tuple of target type, which is either AWS_ACCOUNT or", "name (name may be None), returning True if the permission", "\"\"\" ids = Ids(lambda: session, instance_arn, identity_store_id) return _list_assignments( session,", "{} context = _Context( session = session, ids=ids, principal=principal, principal_filter=principal_filter,", "not in response: continue for permission_set_arn in response[\"PermissionSets\"]: permission_set_id =", "included. target: A target specification or list of target specifications.", "to use if principal names are being retrieved or it", "\"AWS_ACCOUNT\": raise TypeError(f\"Unsupported target type {target_type}\") sso_admin_client = context.session.client(\"sso-admin\") permission_sets_paginator", "(permission_set_arn, permission_set_name)): LOGGER.debug(f\"Permission set is filtered: {(permission_set_id, permission_set_name)}\") continue LOGGER.debug(f\"Visiting", "Retrieve names for targets in assignments. ou_recursive (bool): Set to", "= None else: raise ValueError(f\"Unknown principal type {principal_type}\") principal_name =", "is an account or OU id, or a 2-tuple of", "up using ListInstances identity_store_id (str): The identity store to use", "permission_set_iterator(target_type, target_id, target_name): for principal_type, principal_id, principal_name in principal_iterator( target_type,", "else: kwargs = json.loads(v) def fil(*args): print(args) return True kwargs[\"target_filter\"]", "\"get_target_names\", \"ou_recursive\", \"cache\", \"filter_cache\" ]) def _filter(filter_cache, key, func, args):", "principal_name)): if context.principal: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") else: LOGGER.debug(f\"Principal is", "f\"arn:aws:sso:::permissionSet/{ids.instance_id}/{permission_set}\" else: raise TypeError(f\"Invalid permission set id {permission_set}\") return [permission_set_arn]", "response: {response}\") permission_set_name = response[\"PermissionSet\"][\"Name\"] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter,", "all accounts including those in child OUs. Returns: An iterator", "target_iterator(): target_name = None # if context.get_target_names: # organizations_client =", "format_account_id(target))] if isinstance(target, str): if re.match(r\"^\\d+$\", target): return [(\"AWS_ACCOUNT\", format_account_id(target))]", "principal {principal_type}:{principal_id}\") if context.principal: for principal in context.principal: type_matches =", "ou_recursive (bool): Set to True if an OU is provided", "assignments_paginator = sso_admin_client.get_paginator(\"list_account_assignments\") for response in assignments_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id, PermissionSetArn=permission_set_arn):", "None if not isinstance(permission_set, str) and isinstance(permission_set, Iterable): return _flatten(_process_permission_set(ids,", "permission set id {permission_set}\") return [permission_set_arn] def _is_target_tuple(target): try: return", "a list of the same. permission_set_filter: A callable taking permission", "response[\"PermissionSet\"][\"Name\"] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn, permission_set_name)): LOGGER.debug(f\"Single permission", "return _get_account_iterator(target, context) elif target_type == \"AWS_OU\": return _get_ou_iterator(target, context)", "v in sys.argv[1:]: if hasattr(logging, v): LOGGER.setLevel(getattr(logging, v)) else: kwargs", "up using ListInstances principal: A principal specification or list of", "_Context): def target_iterator(): target_name = None # if context.get_target_names: #", "account[\"Name\"] return target_iterator def _get_single_target_iterator(target, context: _Context): target_type = target[0]", "_is_target_tuple(target): try: return all([ len(target) == 2, isinstance(target[0], str), target[0]", "== \"AWS_OU\": return _get_ou_iterator(target, context) else: raise TypeError(f\"Invalid target type", "use, or it will be looked up using ListInstances identity_store_id", "organizations_client = context.session.client(\"organizations\") accounts_paginator = organizations_client.get_paginator(\"list_accounts\") for response in accounts_paginator.paginate():", "being retrieved or it will be looked up using ListInstances", "permission_sets_paginator = sso_admin_client.get_paginator(\"list_permission_sets_provisioned_to_account\") for response in permission_sets_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id): LOGGER.debug(f\"ListPermissionSetsProvisionedToAccount", "be None), returning True if the permission set should be", "{response}\") for account in response[\"Accounts\"]: account_id = account[\"Id\"] account_name =", "account[\"Id\"], account[\"Name\"] return target_iterator def _get_single_target_iterator(target, context: _Context): target_type =", "permission_set_iterator(target_type, target_id, target_name): return itertools.chain(*[it(target_type, target_id, target_name) for it in", "\"USER\"], isinstance(principal[1], str), ]) except: return False def _process_principal(principal): if", "type, target id, and target name (which may be None),", "= permission_set permission_set_id = permission_set_arn.split(\"/\")[-1] def permission_set_iterator(target_type, target_id, target_name): if", "v): LOGGER.setLevel(getattr(logging, v)) else: kwargs = json.loads(v) def fil(*args): print(args)", "included. permission_set: A permission set arn or id, or a", "organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])[\"OrganizationalUnit\"] # if ou.get(\"Name\"): # target_name = ou(\"Name\") value =", "principal: return None if isinstance(principal, str): return [(None, principal)] if", "type, principal id, and principal name (which may be None),", "_filter(context.filter_cache, value[1], context.target_filter, value): LOGGER.debug(f\"Account is filtered: {value}\") else: LOGGER.debug(f\"Visiting", "_get_single_permission_set_iterator(permission_set, context: _Context): permission_set_arn = permission_set permission_set_id = permission_set_arn.split(\"/\")[-1] def", "looked up using ListInstances identity_store_id (str): The identity store to", "collections.abc import Iterable import itertools import aws_error_utils from .lookup import", "permission set {(permission_set_id, permission_set_name)}\") yield permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator", "specification or list of principal specifications. A principal specification is", "principal id or a 2-tuple of principal type and id.", "LOGGER.debug(f\"DescribePermissionSet response: {response}\") context.cache[permission_set_arn] = response[\"PermissionSet\"][\"Name\"] permission_set_name = context.cache[permission_set_arn] if", "target_name): if not context.get_permission_set_names: permission_set_name = None else: sso_admin_client =", "context.target_filter, value): LOGGER.debug(f\"Account is filtered: {value}\") else: LOGGER.debug(f\"Visiting single account:", "else: LOGGER.debug(f\"Principal is filtered: {principal_type}:{principal_id}\") continue LOGGER.debug(f\"Visiting principal: {principal_type}:{principal_id}\") yield", "None), returning True if the permission set should be included.", "target_name in target_iterator(): for permission_set_arn, permission_set_id, permission_set_name, in permission_set_iterator(target_type, target_id,", "itertools.chain(*[it(target_type, target_id, target_name) for it in iterables]) return permission_set_iterator else:", "target_id, target_name): for principal_type, principal_id, principal_name in principal_iterator( target_type, target_id,", "print(\",\".join(v or \"\" for v in value)) except KeyboardInterrupt: pass", ".format import format_account_id LOGGER = logging.getLogger(__name__) _Context = collections.namedtuple(\"_Context\", [", "\"principal_filter\", \"permission_set\", \"permission_set_filter\", \"target\", \"target_filter\", \"get_principal_names\", \"get_permission_set_names\", \"get_target_names\", \"ou_recursive\", \"cache\",", "try: return all([ len(principal) == 2, isinstance(principal[0], str), principal[0] in", "try: session = boto3.Session() print(\",\".join(Assignment._fields)) for value in list_assignments(session, **kwargs):", "context.get_target_names: organizations_client = context.session.client(\"organizations\") account = organizations_client.describe_account(AccountId=target[1])[\"Account\"] if account.get(\"Name\"): target_name", "permission_set_arn, permission_set_id, permission_set_name return permission_set_iterator def _get_permission_set_iterator(context: _Context): if context.permission_set:", "def permission_set_iterator(target_type, target_id, target_name): if not context.get_permission_set_names: permission_set_name = None", "permission_set_id, permission_set_name): if target_type != \"AWS_ACCOUNT\": raise TypeError(f\"Unsupported target type", "return permission_set_iterator def _get_permission_set_iterator(context: _Context): if context.permission_set: iterables = [_get_single_permission_set_iterator(ps,", "in assignments. ou_recursive (bool): Set to True if an OU", "not _filter(context.filter_cache, value[1], context.target_filter, value): LOGGER.debug(f\"Account is filtered: {value}\") else:", "principal_name = None else: if principal_key not in context.cache: if", "identity_store_id (str): The identity store to use if principal names", "principal should be included. permission_set: A permission set arn or", "assignment[\"PrincipalType\"] principal_id = assignment[\"PrincipalId\"] LOGGER.debug(f\"Visiting principal {principal_type}:{principal_id}\") if context.principal: for", "2, isinstance(target[0], str), target[0] in [\"AWS_OU\", \"AWS_ACCOUNT\"], isinstance(target[1], str), ])", "account_id, context.target_filter, value): LOGGER.debug(f\"Account is filtered: {value}\") continue LOGGER.debug(f\"Visiting account:", "# ou = organizations_client.describe_organizational_unit(OrganizationalUnitId=target[1])[\"OrganizationalUnit\"] # if ou.get(\"Name\"): # target_name =", "\"permission_set\", \"permission_set_filter\", \"target\", \"target_filter\", \"get_principal_names\", \"get_permission_set_names\", \"get_target_names\", \"ou_recursive\", \"cache\", \"filter_cache\"", "{response}\") permission_set_name = response[\"PermissionSet\"][\"Name\"] if not _filter(context.filter_cache, permission_set_arn, context.permission_set_filter, (permission_set_arn,", "= logging.getLogger(__name__) _Context = collections.namedtuple(\"_Context\", [ \"session\", \"ids\", \"principal\", \"principal_filter\",", "target type, which is either AWS_ACCOUNT or AWS_OU, and target", "arn or id, or a list of the same. permission_set_filter:", "for permission_set_arn in response[\"PermissionSets\"]: permission_set_id = permission_set_arn.split(\"/\", 2)[-1] if not", "response[\"PermissionSets\"]: permission_set_id = permission_set_arn.split(\"/\", 2)[-1] if not context.get_permission_set_names: permission_set_name =", "context.get_principal_names: principal_name = None else: if principal_key not in context.cache:", "assignments_paginator.paginate( InstanceArn=context.ids.instance_arn, AccountId=target_id, PermissionSetArn=permission_set_arn): LOGGER.debug(f\"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page: {response}\")", "def _get_principal_iterator(context: _Context): def principal_iterator( target_type, target_id, target_name, permission_set_arn, permission_set_id,", "context.cache[principal_key] = None else: raise ValueError(f\"Unknown principal type {principal_type}\") principal_name", "A permission set arn or id, or a list of", "in target) return value def _get_account_iterator(target, context: _Context): def target_iterator():", "permission sets\") return _get_all_permission_sets_iterator(context) def _get_principal_iterator(context: _Context): def principal_iterator( target_type,", "AccountId=target_id, PermissionSetArn=permission_set_arn): LOGGER.debug(f\"ListAccountAssignments for {target_id} {permission_set_arn.split('/')[-1]} page: {response}\") if not", "or re.match(r\"^ou-[a-z0-9]{4,32}-[a-z0-9]{8,32}$\", target): return [(\"AWS_OU\", target)] else: raise TypeError(f\"Invalid target", "isinstance(permission_set, str) and isinstance(permission_set, Iterable): return _flatten(_process_permission_set(ids, ps) for ps", "(boto3.Session): boto3 session to use instance_arn (str): The SSO instance", "target_name = None if context.get_target_names: organizations_client = context.session.client(\"organizations\") account =", "LOGGER.debug(f\"ListAccounts page: {response}\") for account in response[\"Accounts\"]: account_id = account[\"Id\"]", "return _get_ou_iterator(target, context) else: raise TypeError(f\"Invalid target type {target_type}\") def", "a target to get all accounts including those in child", "lookup_accounts_for_ou from .format import format_account_id LOGGER = logging.getLogger(__name__) _Context =", "response[\"Accounts\"]: account_id = account[\"Id\"] account_name = account[\"Name\"] value = (\"AWS_ACCOUNT\",", ") target_iterator = _get_target_iterator(context) permission_set_iterator = _get_permission_set_iterator(context) principal_iterator = _get_principal_iterator(context)", "An iterator over Assignment namedtuples \"\"\" ids = Ids(lambda: session,", "continue LOGGER.debug(f\"Visiting principal: {principal_type}:{principal_id}\") yield principal_type, principal_id, principal_name return principal_iterator", "SSO instance to use, or it will be looked up", "target_type, target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): assignment = Assignment( ids.instance_arn,", ".lookup import Ids, lookup_accounts_for_ou from .format import format_account_id LOGGER =", "== principal_id: LOGGER.debug(f\"Found principal {principal_type}:{principal_id}\") break else: LOGGER.debug(f\"Principal {principal_type}:{principal_id} does", "isinstance(principal[1], str), ]) except: return False def _process_principal(principal): if not", "target_id, target_name, permission_set_arn, permission_set_id, permission_set_name): if target_type != \"AWS_ACCOUNT\": raise", "== 2, isinstance(target[0], str), target[0] in [\"AWS_OU\", \"AWS_ACCOUNT\"], isinstance(target[1], str)," ]
[ "import ThreadPoolExecutor from service.train import do_train def thread_runner(thread_num, func, *args):", "from concurrent.futures import ThreadPoolExecutor from service.train import do_train def thread_runner(thread_num,", "def thread_runner(thread_num, func, *args): executor = ThreadPoolExecutor(thread_num) f = executor.submit(do_train,", "ThreadPoolExecutor from service.train import do_train def thread_runner(thread_num, func, *args): executor", "thread_runner(thread_num, func, *args): executor = ThreadPoolExecutor(thread_num) f = executor.submit(do_train, *args)", "import do_train def thread_runner(thread_num, func, *args): executor = ThreadPoolExecutor(thread_num) f", "concurrent.futures import ThreadPoolExecutor from service.train import do_train def thread_runner(thread_num, func,", "service.train import do_train def thread_runner(thread_num, func, *args): executor = ThreadPoolExecutor(thread_num)", "threading from concurrent.futures import ThreadPoolExecutor from service.train import do_train def", "do_train def thread_runner(thread_num, func, *args): executor = ThreadPoolExecutor(thread_num) f =", "import threading from concurrent.futures import ThreadPoolExecutor from service.train import do_train", "from service.train import do_train def thread_runner(thread_num, func, *args): executor =", "<reponame>naetimus/bootcamp<filename>solutions/pic_search/webserver/src/service/theardpool.py import threading from concurrent.futures import ThreadPoolExecutor from service.train import" ]
[ "prog=\"buildutil\", description=\"Assembly/C/C++ utility to build embedded systems\", epilog=\"Author: <NAME>\", fromfile_prefix_chars='@')", "== \"build\": if subcommand == \"build\": makefilePath = os.path.join(absFilePath, \"conf/make/Makefile\")", "Execute parse_args() args = parser.parse_args() subcommand = parser.parse_args().cmd if args.version", "= os.path.abspath(os.getcwd()) parser = argparse.ArgumentParser( prog=\"buildutil\", description=\"Assembly/C/C++ utility to build", "True: print(f\"version: {__version__}\") exit(0) # if subcommand is None or", "print(f\"File: {absFilePath}\") print(F\"CWD: {cwdPath}\") print(F\"Working directory: {wd}\") print(F\"makefile path: {makefilePath}\")", "absFilePath = os.path.dirname(os.path.abspath(__file__)) cwdPath = os.path.abspath(os.getcwd()) parser = argparse.ArgumentParser( prog=\"buildutil\",", "\"\"\" parser.add_argument( '-d', '--directory', type=str, default=cwdPath, help='the config filepath') parser.add_argument(", "import __version__ from configParser import ConfigParser else: from .version import", "utility to build embedded systems\", epilog=\"Author: <NAME>\", fromfile_prefix_chars='@') # parser.add_argument('-v',", "ConfigParser() print(\"fuck\") return # Working directory wd = os.path.abspath(args.directory) print(f\"File:", "# parser.add_argument( # '-f', # '--file', # help='A readable file',", "import ConfigParser else: from .version import __version__ from .configParser import", "'-d', '--directory', type=str, default=cwdPath, help='the config filepath') parser.add_argument( '-v', '--version',", "os.path.join(absFilePath, \"conf/make/Makefile\") command(f\"make -f {makefilePath}\") elif subcommand == \"get_version\": print(\"version\")", "if subcommand is None or subcommand == \"build\": if subcommand", "readable file', # metavar='FILE', # type=argparse.FileType('r'), # default=None) cmd_parser =", "None or subcommand == \"build\": if subcommand == \"build\": makefilePath", "{makefilePath}\") print() command(f\"make -f {makefilePath}\") if __name__ == '__main__': main()", "'--verbose', # action='store_true', # help='an optional argument') \"\"\" parser.add_argument('Path', metavar='path',", "makefilePath = os.path.join(absFilePath, \"conf/make/Makefile\") command(f\"make -f {makefilePath}\") elif subcommand ==", "import argparse import subprocess if __name__ == '__main__': from version", "version') # Execute parse_args() args = parser.parse_args() subcommand = parser.parse_args().cmd", "import subprocess if __name__ == '__main__': from version import __version__", "directory wd = os.path.abspath(args.directory) print(f\"File: {absFilePath}\") print(F\"CWD: {cwdPath}\") print(F\"Working directory:", "the project\") parser_get_version = cmd_parser.add_parser( 'get_version', help=\"try to get the", "subcommand == \"build\": if subcommand == \"build\": makefilePath = os.path.join(absFilePath,", "= process.communicate() return stdout, stderr \"\"\" def main(): absFilePath =", "= cmd_parser.add_parser( 'get_version', help=\"try to get the version from git\")", "Working directory wd = os.path.abspath(args.directory) print(f\"File: {absFilePath}\") print(F\"CWD: {cwdPath}\") print(F\"Working", ".configParser import ConfigParser def command(cmd): \"\"\"Run a shell command\"\"\" subprocess.call(cmd,", "'--directory', type=str, default=cwdPath, help='the config filepath') parser.add_argument( '-v', '--version', action='store_true',", "get the version') # Execute parse_args() args = parser.parse_args() subcommand", "configParser import ConfigParser else: from .version import __version__ from .configParser", "# action='store_true', # help='an optional argument') \"\"\" parser.add_argument('Path', metavar='path', type=str,", "stdout, stderr = process.communicate() return stdout, stderr \"\"\" def main():", "'--version', action='store_true', help='get the version of the build system') #", "= parser.parse_args().cmd if args.version is True: print(f\"version: {__version__}\") exit(0) #", "ConfigParser def command(cmd): \"\"\"Run a shell command\"\"\" subprocess.call(cmd, shell=True) \"\"\"", "main(): absFilePath = os.path.dirname(os.path.abspath(__file__)) cwdPath = os.path.abspath(os.getcwd()) parser = argparse.ArgumentParser(", "command(f\"make -f {makefilePath}\") elif subcommand == \"get_version\": print(\"version\") else: ConfigParser()", "# help='try to get the version') # Execute parse_args() args", "'build', help=\"build the project\") parser_get_version = cmd_parser.add_parser( 'get_version', help=\"try to", "{__version__}\") exit(0) # if subcommand is None or subcommand ==", "cmd_parser.add_parser( 'get_version', help=\"try to get the version from git\") #", "directory: {wd}\") print(F\"makefile path: {makefilePath}\") print() command(f\"make -f {makefilePath}\") if", "= os.path.dirname(os.path.abspath(__file__)) cwdPath = os.path.abspath(os.getcwd()) parser = argparse.ArgumentParser( prog=\"buildutil\", description=\"Assembly/C/C++", "# '-f', # '--file', # help='A readable file', # metavar='FILE',", "os.path.abspath(args.directory) print(f\"File: {absFilePath}\") print(F\"CWD: {cwdPath}\") print(F\"Working directory: {wd}\") print(F\"makefile path:", "{cwdPath}\") print(F\"Working directory: {wd}\") print(F\"makefile path: {makefilePath}\") print() command(f\"make -f", "metavar='FILE', # type=argparse.FileType('r'), # default=None) cmd_parser = parser.add_subparsers(dest='cmd', description=\"\") parser_build", "'-f', # '--file', # help='A readable file', # metavar='FILE', #", "stdout, stderr \"\"\" def main(): absFilePath = os.path.dirname(os.path.abspath(__file__)) cwdPath =", "ConfigParser else: from .version import __version__ from .configParser import ConfigParser", "cwdPath = os.path.abspath(os.getcwd()) parser = argparse.ArgumentParser( prog=\"buildutil\", description=\"Assembly/C/C++ utility to", "os import argparse import subprocess if __name__ == '__main__': from", "to build embedded systems\", epilog=\"Author: <NAME>\", fromfile_prefix_chars='@') # parser.add_argument('-v', '--verbose',", "is True: print(f\"version: {__version__}\") exit(0) # if subcommand is None", "shell command\"\"\" subprocess.call(cmd, shell=True) \"\"\" cmd_split = cmd.split() process =", "== '__main__': from version import __version__ from configParser import ConfigParser", "get the version from git\") # parser_get_version.add_argument( # '-a', '--alpha',", "print(F\"CWD: {cwdPath}\") print(F\"Working directory: {wd}\") print(F\"makefile path: {makefilePath}\") print() command(f\"make", "command(cmd): \"\"\"Run a shell command\"\"\" subprocess.call(cmd, shell=True) \"\"\" cmd_split =", "'-v', '--version', action='store_true', help='get the version of the build system')", "filepath') parser.add_argument( '-v', '--version', action='store_true', help='get the version of the", "= os.path.join(absFilePath, \"conf/make/Makefile\") command(f\"make -f {makefilePath}\") elif subcommand == \"get_version\":", "== \"build\": makefilePath = os.path.join(absFilePath, \"conf/make/Makefile\") command(f\"make -f {makefilePath}\") elif", "the version of the build system') # parser.add_argument( # '-f',", "cmd_parser.add_parser( 'build', help=\"build the project\") parser_get_version = cmd_parser.add_parser( 'get_version', help=\"try", "args.version is True: print(f\"version: {__version__}\") exit(0) # if subcommand is", "print(\"version\") else: ConfigParser() print(\"fuck\") return # Working directory wd =", "print(F\"Working directory: {wd}\") print(F\"makefile path: {makefilePath}\") print() command(f\"make -f {makefilePath}\")", "else: from .version import __version__ from .configParser import ConfigParser def", "\"\"\" parser.add_argument('Path', metavar='path', type=str, default=cwdPath, help='the config filepath') \"\"\" parser.add_argument(", "= os.path.abspath(args.directory) print(f\"File: {absFilePath}\") print(F\"CWD: {cwdPath}\") print(F\"Working directory: {wd}\") print(F\"makefile", "= subprocess.Popen(cmd_split, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate()", "version import __version__ from configParser import ConfigParser else: from .version", "\"build\": if subcommand == \"build\": makefilePath = os.path.join(absFilePath, \"conf/make/Makefile\") command(f\"make", "cmd_parser = parser.add_subparsers(dest='cmd', description=\"\") parser_build = cmd_parser.add_parser( 'build', help=\"build the", "parser.add_argument('Path', metavar='path', type=str, default=cwdPath, help='the config filepath') \"\"\" parser.add_argument( '-d',", "print(f\"version: {__version__}\") exit(0) # if subcommand is None or subcommand", "file', # metavar='FILE', # type=argparse.FileType('r'), # default=None) cmd_parser = parser.add_subparsers(dest='cmd',", "type=argparse.FileType('r'), # default=None) cmd_parser = parser.add_subparsers(dest='cmd', description=\"\") parser_build = cmd_parser.add_parser(", "parser.add_subparsers(dest='cmd', description=\"\") parser_build = cmd_parser.add_parser( 'build', help=\"build the project\") parser_get_version", "shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() return stdout,", "argparse import subprocess if __name__ == '__main__': from version import", "config filepath') parser.add_argument( '-v', '--version', action='store_true', help='get the version of", "default=None) cmd_parser = parser.add_subparsers(dest='cmd', description=\"\") parser_build = cmd_parser.add_parser( 'build', help=\"build", "'__main__': from version import __version__ from configParser import ConfigParser else:", "\"conf/make/Makefile\") command(f\"make -f {makefilePath}\") elif subcommand == \"get_version\": print(\"version\") else:", "<NAME>\", fromfile_prefix_chars='@') # parser.add_argument('-v', '--verbose', # action='store_true', # help='an optional", "parser_get_version.add_argument( # '-a', '--alpha', # dest='alpha', # help='try to get", "to get the version') # Execute parse_args() args = parser.parse_args()", "action='store_true', # help='an optional argument') \"\"\" parser.add_argument('Path', metavar='path', type=str, default=cwdPath,", "\"build\": makefilePath = os.path.join(absFilePath, \"conf/make/Makefile\") command(f\"make -f {makefilePath}\") elif subcommand", "cmd_split = cmd.split() process = subprocess.Popen(cmd_split, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)", "# parser.add_argument('-v', '--verbose', # action='store_true', # help='an optional argument') \"\"\"", "from git\") # parser_get_version.add_argument( # '-a', '--alpha', # dest='alpha', #", "= parser.parse_args() subcommand = parser.parse_args().cmd if args.version is True: print(f\"version:", "elif subcommand == \"get_version\": print(\"version\") else: ConfigParser() print(\"fuck\") return #", "stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() return stdout, stderr", "parser_get_version = cmd_parser.add_parser( 'get_version', help=\"try to get the version from", "if subcommand == \"build\": makefilePath = os.path.join(absFilePath, \"conf/make/Makefile\") command(f\"make -f", "subprocess if __name__ == '__main__': from version import __version__ from", "'-a', '--alpha', # dest='alpha', # help='try to get the version')", "os.path.abspath(os.getcwd()) parser = argparse.ArgumentParser( prog=\"buildutil\", description=\"Assembly/C/C++ utility to build embedded", "import __version__ from .configParser import ConfigParser def command(cmd): \"\"\"Run a", "description=\"\") parser_build = cmd_parser.add_parser( 'build', help=\"build the project\") parser_get_version =", "= argparse.ArgumentParser( prog=\"buildutil\", description=\"Assembly/C/C++ utility to build embedded systems\", epilog=\"Author:", "print(\"fuck\") return # Working directory wd = os.path.abspath(args.directory) print(f\"File: {absFilePath}\")", "parser.add_argument( # '-f', # '--file', # help='A readable file', #", "'get_version', help=\"try to get the version from git\") # parser_get_version.add_argument(", "build system') # parser.add_argument( # '-f', # '--file', # help='A", "help='an optional argument') \"\"\" parser.add_argument('Path', metavar='path', type=str, default=cwdPath, help='the config", "help='A readable file', # metavar='FILE', # type=argparse.FileType('r'), # default=None) cmd_parser", "return stdout, stderr \"\"\" def main(): absFilePath = os.path.dirname(os.path.abspath(__file__)) cwdPath", "version of the build system') # parser.add_argument( # '-f', #", "or subcommand == \"build\": if subcommand == \"build\": makefilePath =", "help=\"try to get the version from git\") # parser_get_version.add_argument( #", "subcommand = parser.parse_args().cmd if args.version is True: print(f\"version: {__version__}\") exit(0)", "parser.add_argument('-v', '--verbose', # action='store_true', # help='an optional argument') \"\"\" parser.add_argument('Path',", "help='try to get the version') # Execute parse_args() args =", "'--file', # help='A readable file', # metavar='FILE', # type=argparse.FileType('r'), #", "# Execute parse_args() args = parser.parse_args() subcommand = parser.parse_args().cmd if", "parse_args() args = parser.parse_args() subcommand = parser.parse_args().cmd if args.version is", "systems\", epilog=\"Author: <NAME>\", fromfile_prefix_chars='@') # parser.add_argument('-v', '--verbose', # action='store_true', #", "subcommand == \"build\": makefilePath = os.path.join(absFilePath, \"conf/make/Makefile\") command(f\"make -f {makefilePath}\")", "from version import __version__ from configParser import ConfigParser else: from", "{makefilePath}\") elif subcommand == \"get_version\": print(\"version\") else: ConfigParser() print(\"fuck\") return", "# Working directory wd = os.path.abspath(args.directory) print(f\"File: {absFilePath}\") print(F\"CWD: {cwdPath}\")", "= parser.add_subparsers(dest='cmd', description=\"\") parser_build = cmd_parser.add_parser( 'build', help=\"build the project\")", "# help='an optional argument') \"\"\" parser.add_argument('Path', metavar='path', type=str, default=cwdPath, help='the", "= cmd_parser.add_parser( 'build', help=\"build the project\") parser_get_version = cmd_parser.add_parser( 'get_version',", "from .version import __version__ from .configParser import ConfigParser def command(cmd):", "if __name__ == '__main__': from version import __version__ from configParser", "# default=None) cmd_parser = parser.add_subparsers(dest='cmd', description=\"\") parser_build = cmd_parser.add_parser( 'build',", "a shell command\"\"\" subprocess.call(cmd, shell=True) \"\"\" cmd_split = cmd.split() process", "type=str, default=cwdPath, help='the config filepath') parser.add_argument( '-v', '--version', action='store_true', help='get", "== \"get_version\": print(\"version\") else: ConfigParser() print(\"fuck\") return # Working directory", "os.path.dirname(os.path.abspath(__file__)) cwdPath = os.path.abspath(os.getcwd()) parser = argparse.ArgumentParser( prog=\"buildutil\", description=\"Assembly/C/C++ utility", "def command(cmd): \"\"\"Run a shell command\"\"\" subprocess.call(cmd, shell=True) \"\"\" cmd_split", "type=str, default=cwdPath, help='the config filepath') \"\"\" parser.add_argument( '-d', '--directory', type=str,", "description=\"Assembly/C/C++ utility to build embedded systems\", epilog=\"Author: <NAME>\", fromfile_prefix_chars='@') #", "\"\"\" def main(): absFilePath = os.path.dirname(os.path.abspath(__file__)) cwdPath = os.path.abspath(os.getcwd()) parser", "project\") parser_get_version = cmd_parser.add_parser( 'get_version', help=\"try to get the version", "embedded systems\", epilog=\"Author: <NAME>\", fromfile_prefix_chars='@') # parser.add_argument('-v', '--verbose', # action='store_true',", "from configParser import ConfigParser else: from .version import __version__ from", "shell=True) \"\"\" cmd_split = cmd.split() process = subprocess.Popen(cmd_split, shell=True, stdout=subprocess.PIPE,", "\"\"\" cmd_split = cmd.split() process = subprocess.Popen(cmd_split, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE,", ".version import __version__ from .configParser import ConfigParser def command(cmd): \"\"\"Run", "process.communicate() return stdout, stderr \"\"\" def main(): absFilePath = os.path.dirname(os.path.abspath(__file__))", "subprocess.Popen(cmd_split, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() return", "fromfile_prefix_chars='@') # parser.add_argument('-v', '--verbose', # action='store_true', # help='an optional argument')", "# parser_get_version.add_argument( # '-a', '--alpha', # dest='alpha', # help='try to", "stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr = process.communicate() return stdout, stderr \"\"\"", "epilog=\"Author: <NAME>\", fromfile_prefix_chars='@') # parser.add_argument('-v', '--verbose', # action='store_true', # help='an", "args = parser.parse_args() subcommand = parser.parse_args().cmd if args.version is True:", "default=cwdPath, help='the config filepath') parser.add_argument( '-v', '--version', action='store_true', help='get the", "# type=argparse.FileType('r'), # default=None) cmd_parser = parser.add_subparsers(dest='cmd', description=\"\") parser_build =", "# dest='alpha', # help='try to get the version') # Execute", "else: ConfigParser() print(\"fuck\") return # Working directory wd = os.path.abspath(args.directory)", "argument') \"\"\" parser.add_argument('Path', metavar='path', type=str, default=cwdPath, help='the config filepath') \"\"\"", "from .configParser import ConfigParser def command(cmd): \"\"\"Run a shell command\"\"\"", "'--alpha', # dest='alpha', # help='try to get the version') #", "#!/usr/bin/env python3 import os import argparse import subprocess if __name__", "__name__ == '__main__': from version import __version__ from configParser import", "dest='alpha', # help='try to get the version') # Execute parse_args()", "version from git\") # parser_get_version.add_argument( # '-a', '--alpha', # dest='alpha',", "default=cwdPath, help='the config filepath') \"\"\" parser.add_argument( '-d', '--directory', type=str, default=cwdPath,", "metavar='path', type=str, default=cwdPath, help='the config filepath') \"\"\" parser.add_argument( '-d', '--directory',", "cmd.split() process = subprocess.Popen(cmd_split, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr", "# help='A readable file', # metavar='FILE', # type=argparse.FileType('r'), # default=None)", "= cmd.split() process = subprocess.Popen(cmd_split, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout,", "action='store_true', help='get the version of the build system') # parser.add_argument(", "wd = os.path.abspath(args.directory) print(f\"File: {absFilePath}\") print(F\"CWD: {cwdPath}\") print(F\"Working directory: {wd}\")", "parser.parse_args().cmd if args.version is True: print(f\"version: {__version__}\") exit(0) # if", "optional argument') \"\"\" parser.add_argument('Path', metavar='path', type=str, default=cwdPath, help='the config filepath')", "import ConfigParser def command(cmd): \"\"\"Run a shell command\"\"\" subprocess.call(cmd, shell=True)", "config filepath') \"\"\" parser.add_argument( '-d', '--directory', type=str, default=cwdPath, help='the config", "parser.add_argument( '-d', '--directory', type=str, default=cwdPath, help='the config filepath') parser.add_argument( '-v',", "git\") # parser_get_version.add_argument( # '-a', '--alpha', # dest='alpha', # help='try", "parser.parse_args() subcommand = parser.parse_args().cmd if args.version is True: print(f\"version: {__version__}\")", "# '--file', # help='A readable file', # metavar='FILE', # type=argparse.FileType('r'),", "filepath') \"\"\" parser.add_argument( '-d', '--directory', type=str, default=cwdPath, help='the config filepath')", "build embedded systems\", epilog=\"Author: <NAME>\", fromfile_prefix_chars='@') # parser.add_argument('-v', '--verbose', #", "is None or subcommand == \"build\": if subcommand == \"build\":", "to get the version from git\") # parser_get_version.add_argument( # '-a',", "subcommand == \"get_version\": print(\"version\") else: ConfigParser() print(\"fuck\") return # Working", "universal_newlines=True) stdout, stderr = process.communicate() return stdout, stderr \"\"\" def", "return # Working directory wd = os.path.abspath(args.directory) print(f\"File: {absFilePath}\") print(F\"CWD:", "{wd}\") print(F\"makefile path: {makefilePath}\") print() command(f\"make -f {makefilePath}\") if __name__", "# metavar='FILE', # type=argparse.FileType('r'), # default=None) cmd_parser = parser.add_subparsers(dest='cmd', description=\"\")", "__version__ from .configParser import ConfigParser def command(cmd): \"\"\"Run a shell", "\"\"\"Run a shell command\"\"\" subprocess.call(cmd, shell=True) \"\"\" cmd_split = cmd.split()", "the version') # Execute parse_args() args = parser.parse_args() subcommand =", "system') # parser.add_argument( # '-f', # '--file', # help='A readable", "# if subcommand is None or subcommand == \"build\": if", "help=\"build the project\") parser_get_version = cmd_parser.add_parser( 'get_version', help=\"try to get", "-f {makefilePath}\") elif subcommand == \"get_version\": print(\"version\") else: ConfigParser() print(\"fuck\")", "stderr = process.communicate() return stdout, stderr \"\"\" def main(): absFilePath", "def main(): absFilePath = os.path.dirname(os.path.abspath(__file__)) cwdPath = os.path.abspath(os.getcwd()) parser =", "process = subprocess.Popen(cmd_split, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True) stdout, stderr =", "the version from git\") # parser_get_version.add_argument( # '-a', '--alpha', #", "__version__ from configParser import ConfigParser else: from .version import __version__", "subcommand is None or subcommand == \"build\": if subcommand ==", "stderr \"\"\" def main(): absFilePath = os.path.dirname(os.path.abspath(__file__)) cwdPath = os.path.abspath(os.getcwd())", "import os import argparse import subprocess if __name__ == '__main__':", "help='the config filepath') parser.add_argument( '-v', '--version', action='store_true', help='get the version", "python3 import os import argparse import subprocess if __name__ ==", "help='the config filepath') \"\"\" parser.add_argument( '-d', '--directory', type=str, default=cwdPath, help='the", "the build system') # parser.add_argument( # '-f', # '--file', #", "# '-a', '--alpha', # dest='alpha', # help='try to get the", "argparse.ArgumentParser( prog=\"buildutil\", description=\"Assembly/C/C++ utility to build embedded systems\", epilog=\"Author: <NAME>\",", "\"get_version\": print(\"version\") else: ConfigParser() print(\"fuck\") return # Working directory wd", "parser_build = cmd_parser.add_parser( 'build', help=\"build the project\") parser_get_version = cmd_parser.add_parser(", "parser = argparse.ArgumentParser( prog=\"buildutil\", description=\"Assembly/C/C++ utility to build embedded systems\",", "subprocess.call(cmd, shell=True) \"\"\" cmd_split = cmd.split() process = subprocess.Popen(cmd_split, shell=True,", "of the build system') # parser.add_argument( # '-f', # '--file',", "print(F\"makefile path: {makefilePath}\") print() command(f\"make -f {makefilePath}\") if __name__ ==", "exit(0) # if subcommand is None or subcommand == \"build\":", "help='get the version of the build system') # parser.add_argument( #", "parser.add_argument( '-v', '--version', action='store_true', help='get the version of the build", "if args.version is True: print(f\"version: {__version__}\") exit(0) # if subcommand", "command\"\"\" subprocess.call(cmd, shell=True) \"\"\" cmd_split = cmd.split() process = subprocess.Popen(cmd_split,", "path: {makefilePath}\") print() command(f\"make -f {makefilePath}\") if __name__ == '__main__':", "{absFilePath}\") print(F\"CWD: {cwdPath}\") print(F\"Working directory: {wd}\") print(F\"makefile path: {makefilePath}\") print()" ]
[ "\"links\" } R = S.get(url=URL, params=PARAMS) DATA = R.json() PAGES", "for details #!/usr/bin/python3 \"\"\" get_links.py MediaWiki API Demos Demo of", "requests S = requests.Session() URL = \"https://en.wikipedia.org/w/api.php\" PARAMS = {", "#This file is auto-generated. See modules.json and autogenerator.py for details", "R.json() PAGES = DATA[\"query\"][\"pages\"] for k, v in PAGES.items(): for", "and autogenerator.py for details #!/usr/bin/python3 \"\"\" get_links.py MediaWiki API Demos", "\"query\", \"format\": \"json\", \"titles\": \"<NAME>\", \"prop\": \"links\" } R =", "MediaWiki API Demos Demo of `Links` module: Get all links", "See modules.json and autogenerator.py for details #!/usr/bin/python3 \"\"\" get_links.py MediaWiki", "page(s) MIT License \"\"\" import requests S = requests.Session() URL", "= { \"action\": \"query\", \"format\": \"json\", \"titles\": \"<NAME>\", \"prop\": \"links\"", "\"<NAME>\", \"prop\": \"links\" } R = S.get(url=URL, params=PARAMS) DATA =", "Demos Demo of `Links` module: Get all links on the", "\"titles\": \"<NAME>\", \"prop\": \"links\" } R = S.get(url=URL, params=PARAMS) DATA", "the given page(s) MIT License \"\"\" import requests S =", "MIT License \"\"\" import requests S = requests.Session() URL =", "License \"\"\" import requests S = requests.Session() URL = \"https://en.wikipedia.org/w/api.php\"", "R = S.get(url=URL, params=PARAMS) DATA = R.json() PAGES = DATA[\"query\"][\"pages\"]", "is auto-generated. See modules.json and autogenerator.py for details #!/usr/bin/python3 \"\"\"", "all links on the given page(s) MIT License \"\"\" import", "file is auto-generated. See modules.json and autogenerator.py for details #!/usr/bin/python3", "= S.get(url=URL, params=PARAMS) DATA = R.json() PAGES = DATA[\"query\"][\"pages\"] for", "PAGES = DATA[\"query\"][\"pages\"] for k, v in PAGES.items(): for l", "modules.json and autogenerator.py for details #!/usr/bin/python3 \"\"\" get_links.py MediaWiki API", "details #!/usr/bin/python3 \"\"\" get_links.py MediaWiki API Demos Demo of `Links`", "import requests S = requests.Session() URL = \"https://en.wikipedia.org/w/api.php\" PARAMS =", "= DATA[\"query\"][\"pages\"] for k, v in PAGES.items(): for l in", "\"json\", \"titles\": \"<NAME>\", \"prop\": \"links\" } R = S.get(url=URL, params=PARAMS)", "S = requests.Session() URL = \"https://en.wikipedia.org/w/api.php\" PARAMS = { \"action\":", "DATA[\"query\"][\"pages\"] for k, v in PAGES.items(): for l in v[\"links\"]:", "on the given page(s) MIT License \"\"\" import requests S", "`Links` module: Get all links on the given page(s) MIT", "\"\"\" import requests S = requests.Session() URL = \"https://en.wikipedia.org/w/api.php\" PARAMS", "given page(s) MIT License \"\"\" import requests S = requests.Session()", "autogenerator.py for details #!/usr/bin/python3 \"\"\" get_links.py MediaWiki API Demos Demo", "} R = S.get(url=URL, params=PARAMS) DATA = R.json() PAGES =", "for k, v in PAGES.items(): for l in v[\"links\"]: print(l[\"title\"])", "get_links.py MediaWiki API Demos Demo of `Links` module: Get all", "\"\"\" get_links.py MediaWiki API Demos Demo of `Links` module: Get", "{ \"action\": \"query\", \"format\": \"json\", \"titles\": \"<NAME>\", \"prop\": \"links\" }", "auto-generated. See modules.json and autogenerator.py for details #!/usr/bin/python3 \"\"\" get_links.py", "module: Get all links on the given page(s) MIT License", "URL = \"https://en.wikipedia.org/w/api.php\" PARAMS = { \"action\": \"query\", \"format\": \"json\",", "of `Links` module: Get all links on the given page(s)", "#!/usr/bin/python3 \"\"\" get_links.py MediaWiki API Demos Demo of `Links` module:", "Demo of `Links` module: Get all links on the given", "requests.Session() URL = \"https://en.wikipedia.org/w/api.php\" PARAMS = { \"action\": \"query\", \"format\":", "<filename>python/get_links.py #This file is auto-generated. See modules.json and autogenerator.py for", "\"https://en.wikipedia.org/w/api.php\" PARAMS = { \"action\": \"query\", \"format\": \"json\", \"titles\": \"<NAME>\",", "\"prop\": \"links\" } R = S.get(url=URL, params=PARAMS) DATA = R.json()", "params=PARAMS) DATA = R.json() PAGES = DATA[\"query\"][\"pages\"] for k, v", "S.get(url=URL, params=PARAMS) DATA = R.json() PAGES = DATA[\"query\"][\"pages\"] for k,", "\"action\": \"query\", \"format\": \"json\", \"titles\": \"<NAME>\", \"prop\": \"links\" } R", "API Demos Demo of `Links` module: Get all links on", "links on the given page(s) MIT License \"\"\" import requests", "= \"https://en.wikipedia.org/w/api.php\" PARAMS = { \"action\": \"query\", \"format\": \"json\", \"titles\":", "= R.json() PAGES = DATA[\"query\"][\"pages\"] for k, v in PAGES.items():", "Get all links on the given page(s) MIT License \"\"\"", "= requests.Session() URL = \"https://en.wikipedia.org/w/api.php\" PARAMS = { \"action\": \"query\",", "\"format\": \"json\", \"titles\": \"<NAME>\", \"prop\": \"links\" } R = S.get(url=URL,", "DATA = R.json() PAGES = DATA[\"query\"][\"pages\"] for k, v in", "PARAMS = { \"action\": \"query\", \"format\": \"json\", \"titles\": \"<NAME>\", \"prop\":" ]
[ "os import re import readline # Allows easier file input", "not short_name + '.com' == file_name: raise SyntaxError('problem interpreting file", "the scratch directory before running the job. `chk_file` must be", "'--submit', action='store_true', help='Automatically submit jobs?') parser.add_argument('-v', '--verbose', action='store_true', help='make program", "num_cores: Number of cores to request :param str time: Amount", "-c \"from gautools.tools import ' 'use_gen_template as ugt;\\n' 'from thtools", "commandline arguments import datetime import glob # Allows referencing file", "used from other arguments to this function. out_file will be", "readline # Allows easier file input (with tab completion?) import", "script for (Gaussian) jobs for submission to queue If make_xyz", "'running') parser.add_argument('-d', '--hold_jid', default=None, help='job on which this job should", "sfw('#$ -l h_rt={}\\n'.format(time)) sfw('#$ -l mem_total={}G\\n'.format(mem)) sfw('#$ -N {}\\n'.format(job_name)) sfw('#$", "# # You may obtain a copy of the License", "a file to pass to obabel to be used to", "2: # ignore first two lines # number of atoms", "run {} in /net/`'.format(executable) + 'hostname -s`$SCRATCHDIR\\n\\n') sfw('{} <$INPUTFILE >", "'r') as templ_file: if verbose: print('opened {}'.format(template)) for line in", "argparse.ArgumentParser(description=description) parser.add_argument('in_name', help='Name of Gaussian input file') parser.add_argument('-c', '--numcores', type=int,", "i < 2: # ignore first two lines # number", "import datetime import glob # Allows referencing file system/file names", "shall I use?') _in_name_list = [rlinput('file name: ', base_name)] return", "load_obj, get_node_mem;\\n' 'm = get_node_mem();\\n' 'd = load_obj(\\'{}\\');\\n'.format( os.path.abspath(pkl_path)) +", "rel_dir + '/' else: rel_dir = '' f_name = path", "load openbabel/2.4.1\\n\\n') sfw('obabel {} -O {}\\n\\n'.format(os.path.abspath( make_xyz), os.path.abspath(n_xyz))) if make_input:", "gets changed after job submission); mem will be mem; and", "base name for linking the output file to the current", "Not sure if this works in 3.5+ because raw_input is", "help='Number of cores for job') # I should probably check", "return rel_dir, f_name def create_gau_input(coord_name, template, verbose=True): \"\"\" make gaussian", "in_name in in_names: out_name = create_gau_input(in_name, template, verbose=verbose) made_name_list.append(out_name) if", "of atoms # continue # # XYZ files created by", "default=None, help='base name for linking output to cwd while '", "i, line in enumerate(in_file): if i < 2: # ignore", "much because it just won't # submit the job and", "checked to exist first to make sure to not waste", "3 because it should be good to # be working", "\"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF", "thtools import cd, make_obj_dir, save_obj, resolve_path yes = ['y', 'yes',", "chk_file: If not None, this file will be copied back", "rel_dir = '' f_name = path return rel_dir, f_name def", "Job on which this job should depend. This should be", "hold_jid=None, xyz=None, make_xyz=None, make_input=False, ugt_dict=None): \"\"\" Write submission script for", "name of the written file :rtype: str \"\"\" if verbose:", "type=str, default='g09', help='name of executable to run') parser.add_argument('-b', '--batch', action='store_true',", "if __name__ == '__main__': description = 'Create and submit a", "if num_files > 1: print('Multiple files starting with {}'.format(base_name)) if", "be chk_file. :return: The name of the script file :rtype:", "should be good to # be working on the newest", "obtain a copy of the License at # # #", "for linking the output file to the current directory. If", "else: sfw('\\n') sfw('CURRENTDIR=`pwd`\\n') sfw('SCRATCHDIR=/scratch/$USER\\n') sfw('mkdir -p $SCRATCHDIR\\n\\n') sfw('cd $SCRATCHDIR\\n\\n') sfw('cp", "works in 3.5+ because raw_input is gone def rlinput(prompt, prefill=''):", "make_obj_dir, save_obj, resolve_path yes = ['y', 'yes', '1'] # An", "args = parser.parse_args() in_name_list, args.batch = get_input_files(args.in_name, args.batch) if args.template:", "None: sfw('rm $CURRENTDIR/$WORKINGOUT') if chk_file is not None: sfw(' $CURRENTDIR/$WORKINGCHK\\n\\n')", "SCC' parser = argparse.ArgumentParser(description=description) parser.add_argument('in_name', help='Name of Gaussian input file')", "sfw('\\n') sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\\n') sfw('echo output was copied", "line.strip().startswith('Create') or # line.strip().startswith('generated'): # continue # else: out_file.write(line) out_file.write('\\n\\n\\n')", "verbose=verbose) made_name_list.append(out_name) if verbose: print('Added {} to files to possibly", "completed. If this is not None and make_input is True,", "gaussian input file by combining header and coordinates files This", "True else: print('What file name shall I use?') _in_name_list =", "name of file with header for Gaussian calculation (up to", "will give quick feedback about that? parser.add_argument('-t', '--time', help='Time required", "the header for the desired calculation (including charge and multiplicity),", "would # like to make sure everything input gets a", "in templ_file: out_file.write(line) if '\\n' not in line: out_file.write('\\n') with", "verbose: If True, some status messages will be printed (including", "'/' in path: rel_dir, f_name = path.rsplit('/', 1) rel_dir =", "function. out_file will be input_name; xyz will be xyz or", "{}'.format(file_name)) out_name = short_name + '.out' job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1)", "[] for in_name in in_name_list: script_name = write_sub_script(input_name=in_name, num_cores=args.numcores, time=args.time,", "dif. from num names given') job_info = submit_scripts(script_list, args.batch, args.submit,", "print('created Gaussian input file {}'.format(_out_name)) return _out_name def get_input_files(base_name, batch):", "length and the # second sort won't do anything. if", "used to create an xyz file to pass to use_gen_template.", "xyz file to use as input to use_gen_template (if make_input", "with the License. # # You may obtain a copy", "otherwise would # put 1,10,11,... as opposed to 1,...,9,10,... #", "was written by <NAME> in 2015. # # <EMAIL> <EMAIL>", "the job. `chk_file` must be not None as well. :param", "with {}'.format(base_name)) if input('Did you mean to execute a batch", "rlinput(prompt, prefill=''): readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt) finally: readline.set_startup_hook() def", "submitting commands to the shell from warnings import warn from", "chk_file=None, copy_chk=False, ln_running=None, hold_jid=None, xyz=None, make_xyz=None, make_input=False, ugt_dict=None): \"\"\" Write", "_dir_and_file(scripts[0]) with cd(rd, ignore_blank=True): cl = ['qsub', f] # Don't", "if chk_file is not None: sfw('CHECKFILE={}\\n\\n'.format(chk_file)) else: sfw('\\n') if ln_running", "See the License for the specific language governing permissions and", "exist first to make sure to not waste time when", "get_input_files(base_name, batch): _in_name_list = glob.glob(base_name + '*') _in_name_list.sort() # sort", "None, this will be the base name for linking the", "written file :rtype: str \"\"\" if verbose: print('Creating Gaussian input", "import glob # Allows referencing file system/file names import os", "'yes', '1'] # An input function that can prefill in", "input file') parser.add_argument('-c', '--numcores', type=int, default=16, help='Number of cores for", "not None; nproc will be $NSLOTS (useful if this gets", "If make_xyz is not None, the file make_xyz will be", "jobs submitted, but scripts created') else: if submit or input('submit", "newest version of python. from __future__ import print_function import argparse", "else: if verbose: print('{} not submitted'.format(scripts)) _job_info = [' '.join(output.split('", ":param str chk_file: If not None, this file will be", "job_name = 'default' _script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh') temp_xyz = os.path.abspath('.temp'", "not None: sfw('# ') if not copy_chk else None sfw('cp", "1)[0] + '.com' with open(_out_name, 'w') as out_file: with open(template,", "coords') parser.add_argument('-s', '--submit', action='store_true', help='Automatically submit jobs?') parser.add_argument('-v', '--verbose', action='store_true',", "None: n_xyz = temp_xyz else: n_xyz = resolve_path(xyz) temp_pkl =", "bool copy_chk: If this is True, the script will attempt", "file to be written and copied back') parser.add_argument('--copy_chk', action='store_true', help='Copy", "-l mem_total={}G\\n'.format(mem)) sfw('#$ -N {}\\n'.format(job_name)) sfw('#$ -j y\\n') sfw('#$ -o", "xyz: Name of an xyz file to use as input", "return _in_name_list def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False, mem='125', executable='g09', chk_file=None,", "the written file :rtype: str \"\"\" if verbose: print('Creating Gaussian", "submit or input('submit job {}? '.format(scripts[0])) in yes: rd, f", "+ '*') _in_name_list.sort() # sort files alphanumerically _in_name_list.sort(key=len) # sort", "sfw('cp $CURRENTDIR/$CHECKFILE .\\n\\n') else: sfw('\\n') if ln_running is not None:", "not use this file except in compliance with the License.", "print('What file name shall I use?') _in_name_list = [rlinput('file name:", "you may not use this file except in compliance with", "Gaussian can read :param str template: name of file with", "-l\\n\\n') sfw('#$ -pe omp {}\\n'.format(num_cores)) sfw('#$ -M <EMAIL>\\n') sfw('#$ -m", "if '\\n' not in line: out_file.write('\\n') with open(coord_name, 'r') as", "xyz=None, make_xyz=None, make_input=False, ugt_dict=None): \"\"\" Write submission script for (Gaussian)", "out_file will be input_name; xyz will be xyz or a", "# # you may not use this file except in", "to work with python 3 because it should be good", "create input for the Gaussian calculation. :param dict ugt_dict: dict", "print('script written to {}'.format(_script_name)) return _script_name def submit_scripts(scripts, batch=False, submit=False,", "action='store_true', help='Copy check file to the scratch directory') parser.add_argument('-l', '--ln_running',", "copied to $CURRENTDIR\\n\\n') if verbose: print('script written to {}'.format(_script_name)) return", "as templ_file: if verbose: print('opened {}'.format(template)) for line in templ_file:", "in /net/`hostname -s`$SCRATCHDIR\\n') sfw('echo output was copied to $CURRENTDIR\\n\\n') if", "submit a script to run a Gaussian job on SCC'", "to pass to obabel to be used to create an", "else None sfw('cp $CURRENTDIR/$CHECKFILE .\\n\\n') else: sfw('\\n') if ln_running is", "make_input is True, this will also be passed to use_gen_template.", "file by combining header and coordinates files This function takes", "{}\\n\\n'.format(hold_jid)) if make_xyz is not None: sfw('if [ ! -f", "\"\"\" rel_dir, file_name = os.path.split(input_name) if file_name.endswith('.com'): short_name = os.path.splitext(file_name)[0]", "verbose=True): \"\"\" make gaussian input file by combining header and", "'.com' :param str coord_name: name of file with coordinates in", "input file...') _out_name = coord_name.rsplit('.', 1)[0] + '.com' with open(_out_name,", "Allows referencing file system/file names import os import re import", "if make_input: sfw('python -c \"from gautools.tools import ' 'use_gen_template as", "short_name + '.com' == file_name: raise SyntaxError('problem interpreting file name.", "# # # This script was written by <NAME> in", "rel_dir, f_name def create_gau_input(coord_name, template, verbose=True): \"\"\" make gaussian input", "sfw('mkdir -p $SCRATCHDIR\\n\\n') sfw('cd $SCRATCHDIR\\n\\n') sfw('cp $CURRENTDIR/$INPUTFILE .\\n') if chk_file", "file to pass to obabel to be used to create", "header for the desired calculation (including charge and multiplicity), returns", "in_file: if verbose: print('opened {}'.format(coord_name)) for i, line in enumerate(in_file):", "get_node_mem();\\n' 'd = load_obj(\\'{}\\');\\n'.format( os.path.abspath(pkl_path)) + 'ugt(\\'{}\\',\\'{}\\','.format( file_name, os.path.abspath(n_xyz)) +", "= temp_xyz[:-4] if ugt_dict is not None: make_obj_dir() pkl_path =", "time when missing a necessary input file. :param str input_name:", "return _script_name def submit_scripts(scripts, batch=False, submit=False, verbose=False): outputs = []", "this will also be passed to use_gen_template. :param bool copy_chk:", "# # the first line is the number of atoms", "to create input for the Gaussian calculation. :param dict ugt_dict:", "the number of atoms # continue # # XYZ files", "verbose=False, mem='125', executable='g09', chk_file=None, copy_chk=False, ln_running=None, hold_jid=None, xyz=None, make_xyz=None, make_input=False,", "opposed to 1,...,9,10,... # if number 01,02,... They should all", "existing checkpoint file to the scratch directory before running the", "# # This script was written by <NAME> in 2015.", "# XYZ files created by mathematica have a comment #", "file names) :return: name of the written file :rtype: str", "= os.path.split(input_name) if file_name.endswith('.com'): short_name = os.path.splitext(file_name)[0] if not short_name", "everything input gets a script and all the # script", "this job should depend') args = parser.parse_args() in_name_list, args.batch =", "chk_file=args.chk_file, copy_chk=args.copy_chk, ln_running=args.ln_running, hold_jid=args.hold_jid) script_list.append(script_name) if not len(script_list) == len(in_name_list):", "is not None: sfw('#$ -hold_jid {}\\n\\n'.format(hold_jid)) if make_xyz is not", "try: return input(prompt) finally: readline.set_startup_hook() def _dir_and_file(path): warn('_dir_and_file is deprecated.", "Number of cores to request :param str time: Amount of", "openbabel/2.4.1\\n\\n') sfw('obabel {} -O {}\\n\\n'.format(os.path.abspath( make_xyz), os.path.abspath(n_xyz))) if make_input: sfw('python", "to execute a batch job? ') in yes: batch =", "# Don't really know how this works. Copied from #", "sfw('\\n') sfw('CURRENTDIR=`pwd`\\n') sfw('SCRATCHDIR=/scratch/$USER\\n') sfw('mkdir -p $SCRATCHDIR\\n\\n') sfw('cd $SCRATCHDIR\\n\\n') sfw('cp $CURRENTDIR/$INPUTFILE", "ignore first two lines # number of atoms and the", "limitations under the License. # # # ######################################################################## # This", "sfw('cp $CHECKFILE $CURRENTDIR/.\\n\\n') else: sfw('\\n') sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\\n')", "on which this job should depend') args = parser.parse_args() in_name_list,", "file_name file_name = short_name + '.com' print('Assuming input file is", "written by <NAME> in 2015. # # <EMAIL> <EMAIL> #", "specific language governing permissions and # # limitations under the", "IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "governing permissions and # # limitations under the License. #", "submit or input('submit all jobs? ') in yes: for script", "# like to make sure everything input gets a script", "args.batch = get_input_files(args.in_name, args.batch) if args.template: in_name_list = use_template(args.template, in_name_list,", "{} ]; then\\n'.format( os.path.abspath(make_xyz)) + ' exit 17\\n' 'fi\\n\\n') sfw('module", "copy of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0", "# as the second line saying something like: # #", "line saying something like: # # \"Created by mathematica\". Obv.", "y\\n') sfw('#$ -o {}.log\\n\\n'.format(short_name)) if hold_jid is not None: sfw('#$", "Obv. want to ignore that # if line.strip().startswith('Create') or #", "ending with '.com' :param str coord_name: name of file with", "print('No jobs submitted, but scripts created') else: if submit or", ":param str template: name of file with header for Gaussian", "if this gets changed after job submission); mem will be", "output to cwd while ' 'running') parser.add_argument('-d', '--hold_jid', default=None, help='job", "of file with coordinates in a format Gaussian can read", "made_name_list.append(out_name) if verbose: print('Added {} to files to possibly submit.'.format(out_name))", "the shell from warnings import warn from thtools import cd,", "templ_file: out_file.write(line) if '\\n' not in line: out_file.write('\\n') with open(coord_name,", "then\\n'.format( os.path.abspath(make_xyz)) + ' exit 17\\n' 'fi\\n\\n') sfw('module load wxwidgets/3.0.2\\n')", "be working on the newest version of python. from __future__", "file for creating input from coords') parser.add_argument('-s', '--submit', action='store_true', help='Automatically", "Name of the file to use as input :param int", "job_info = submit_scripts(script_list, args.batch, args.submit, args.verbose) if job_info and args.nojobinfo:", "of molecular coordinates (the form should not matter, it will", "# # \"Created by mathematica\". Obv. want to ignore that", "file_name = os.path.split(input_name) if file_name.endswith('.com'): short_name = os.path.splitext(file_name)[0] if not", "out_file, xyz, nproc, mem, or checkpoint because those will all", "a script to run a Gaussian job on SCC' parser", "# # # Licensed under the Apache License, Version 2.0", "to in writing, software # # distributed under the License", "express or # # implied. # # See the License", "None: sfw(' $CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n\\n') sfw('cp $OUTPUTFILE $CURRENTDIR/.\\n') if chk_file", "hold_jid: Job on which this job should depend. This should", "path: rel_dir, f_name = path.rsplit('/', 1) rel_dir = rel_dir +", "If this is not None and make_input is True, this", "# be working on the newest version of python. from", "include out_file, xyz, nproc, mem, or checkpoint because those will", "create_gau_input(coord_name, template, verbose=True): \"\"\" make gaussian input file by combining", "to # be working on the newest version of python.", "creates a Gaussian input file ending with '.com' :param str", "a Gaussian job on SCC' parser = argparse.ArgumentParser(description=description) parser.add_argument('in_name', help='Name", "sure to not waste time when missing a necessary input", "if batch: if submit or input('submit all jobs? ') in", "dict of arguments to pass to use_gen_template. This should not", "under the Apache License, Version 2.0 (the \"License\"); # #", "= parser.parse_args() in_name_list, args.batch = get_input_files(args.in_name, args.batch) if args.template: in_name_list", "verbose') parser.add_argument('-j', '--nojobinfo', action='store_false', help='Do not return the submitted job", "be $NSLOTS (useful if this gets changed after job submission);", "and args.nojobinfo: for job in job_info: print(job) if args.verbose: print('Done.", "other arguments to this function. out_file will be input_name; xyz", "'Create and submit a script to run a Gaussian job", "output was copied to $CURRENTDIR\\n\\n') if verbose: print('script written to", "script_file.write sfw('#!/bin/bash -l\\n\\n') sfw('#$ -pe omp {}\\n'.format(num_cores)) sfw('#$ -M <EMAIL>\\n')", "as I know, but I would # like to make", "/net/`'.format(executable) + 'hostname -s`$SCRATCHDIR\\n\\n') sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable)) sfw('\\n\\n') if", "sfw('\\n\\n') sfw('cp $OUTPUTFILE $CURRENTDIR/.\\n') if chk_file is not None: sfw('cp", "= script_file.write sfw('#!/bin/bash -l\\n\\n') sfw('#$ -pe omp {}\\n'.format(num_cores)) sfw('#$ -M", "copy_chk else None sfw('cp $CURRENTDIR/$CHECKFILE .\\n\\n') else: sfw('\\n') if ln_running", "with header for Gaussian calculation (up to and including the", "if ln_running is not None: sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE", "not short_name + '.' + input_extension == file_name: raise SyntaxError('problem", "= True else: print('What file name shall I use?') _in_name_list", "know how this works. Copied from # http://stackoverflow.com/questions/4256107/ # running-bash-commands-in-python", "that # if line.strip().startswith('Create') or # line.strip().startswith('generated'): # continue #", "universal_newlines=True) output = process.communicate()[0] if verbose: print(output) outputs.append(output) else: if", "just won't # submit the job and it will give", "as in_file: if verbose: print('opened {}'.format(coord_name)) for i, line in", "not waste time when missing a necessary input file. :param", "mathematica have a comment # # as the second line", "mem: int or str :param mem: Minimum amount of memory", "def use_template(template, in_names, verbose): made_name_list = [] for in_name in", "out_file.write('\\n\\n\\n') if verbose: print('created Gaussian input file {}'.format(_out_name)) return _out_name", "# # Unless required by applicable law or agreed to", "in_name_list, args.batch = get_input_files(args.in_name, args.batch) if args.template: in_name_list = use_template(args.template,", "_job_info if __name__ == '__main__': description = 'Create and submit", "copied back after the job has completed. If this is", "the specific language governing permissions and # # limitations under", "directory before running the job. `chk_file` must be not None", "should probably check validity of this time request # Maybe", "Allows easier file input (with tab completion?) import subprocess #", "executable='g09', chk_file=None, copy_chk=False, ln_running=None, hold_jid=None, xyz=None, make_xyz=None, make_input=False, ugt_dict=None): \"\"\"", "be used from other arguments to this function. out_file will", "batch=False, submit=False, verbose=False): outputs = [] if batch: if submit", "time-based name if make_xyz is not None; nproc will be", "-s -b /net/`hostname -s`$PWD/$OUTPUTFILE ' '$CURRENTDIR/$WORKINGOUT\\n') if chk_file is not", "if hold_jid is not None: sfw('#$ -hold_jid {}\\n\\n'.format(hold_jid)) if make_xyz", "directory') parser.add_argument('-l', '--ln_running', type=str, default=None, help='base name for linking output", "if not short_name + '.' + input_extension == file_name: raise", "num names given') job_info = submit_scripts(script_list, args.batch, args.submit, args.verbose) if", "from num names given') job_info = submit_scripts(script_list, args.batch, args.submit, args.verbose)", "= resolve_path(xyz) temp_pkl = temp_xyz[:-4] if ugt_dict is not None:", "prefill=''): readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt) finally: readline.set_startup_hook() def _dir_and_file(path):", "_in_name_list = glob.glob(base_name + '*') _in_name_list.sort() # sort files alphanumerically", "to exist first to make sure to not waste time", "created') else: if submit or input('submit job {}? '.format(scripts[0])) in", "get_input_files(args.in_name, args.batch) if args.template: in_name_list = use_template(args.template, in_name_list, args.verbose) script_list", "atoms # continue # # XYZ files created by mathematica", "\"Created by mathematica\". Obv. want to ignore that # if", "the scratch directory') parser.add_argument('-l', '--ln_running', type=str, default=None, help='base name for", "will be $NSLOTS (useful if this gets changed after job", "import re import readline # Allows easier file input (with", "to this function. out_file will be input_name; xyz will be", "= use_template(args.template, in_name_list, args.verbose) script_list = [] for in_name in", "while ' 'running') parser.add_argument('-d', '--hold_jid', default=None, help='job on which this", "return _in_name_list, batch def use_template(template, in_names, verbose): made_name_list = []", "to be submitted. raise IOError('num scripts dif. from num names", "# # # Copyright 2015 <NAME> IV # # #", "sfw('\\n') if ln_running is not None: sfw('WORKINGOUT={}.out\\n'.format(ln_running)) if chk_file is", "else: print('What file name shall I use?') _in_name_list = [rlinput('file", "I use?') _in_name_list = [rlinput('file name: ', base_name)] return _in_name_list,", "submission to queue If make_xyz is not None, the file", "format 'hh:mm:ss' :param bool verbose: If True, print out some", "in file_name: short_name, input_extension = os.path.splitext(file_name) if not short_name +", "is not None; nproc will be $NSLOTS (useful if this", "'w') as script_file: sfw = script_file.write sfw('#!/bin/bash -l\\n\\n') sfw('#$ -pe", "len(job_name) == 0: job_name = 'default' _script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh')", "Gaussian input file ending with '.com' :param str coord_name: name", "written and copied back') parser.add_argument('--copy_chk', action='store_true', help='Copy check file to", "writing, software # # distributed under the License is distributed", "script was written by <NAME> in 2015. # # <EMAIL>", "that should be the header for the desired calculation (including", "multiple scripts (batch job)') parser.add_argument('-x', '--template', default=None, help='template file for", "None or make_xyz is not None: n_xyz = temp_xyz else:", "time: Amount of time to request in the format 'hh:mm:ss'", "make_input is True). :param str make_xyz: The name of a", "from coords') parser.add_argument('-s', '--submit', action='store_true', help='Automatically submit jobs?') parser.add_argument('-v', '--verbose',", "sfw('CHECKFILE={}\\n\\n'.format(chk_file)) else: sfw('\\n') if ln_running is not None: sfw('WORKINGOUT={}.out\\n'.format(ln_running)) if", "use as input to use_gen_template (if make_input is True). :param", "the first line is the number of atoms # continue", "Unless required by applicable law or agreed to in writing,", "if submit or input('submit all jobs? ') in yes: for", "by length (because otherwise would # put 1,10,11,... as opposed", "be the same length and the # second sort won't", "exit 17\\n' 'fi\\n\\n') sfw('module load wxwidgets/3.0.2\\n') sfw('module load openbabel/2.4.1\\n\\n') sfw('obabel", "if verbose: print('{} not submitted'.format(scripts)) _job_info = [' '.join(output.split(' ')[2:4])", "changed after job submission); mem will be mem; and checkpoint", "in /net/`'.format(executable) + 'hostname -s`$SCRATCHDIR\\n\\n') sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable)) sfw('\\n\\n')", "of cores to request :param str time: Amount of time", "str template: name of file with header for Gaussian calculation", "from other arguments to this function. out_file will be input_name;", "(if make_input is True). :param str make_xyz: The name of", "ran in /net/`hostname -s`$SCRATCHDIR\\n') sfw('echo output was copied to $CURRENTDIR\\n\\n')", "# # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required", "f] # Don't really know how this works. Copied from", "should be the header for the desired calculation (including charge", "missing a necessary input file. :param str input_name: Name of", "if not short_name + '.com' == file_name: raise SyntaxError('problem interpreting", "chk_file. :return: The name of the script file :rtype: str", "+ '.out' job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1) if len(job_name) == 0:", "'**d)\"\\n\\n') sfw('INPUTFILE={}\\n'.format(file_name)) sfw('OUTPUTFILE={}\\n'.format(out_name)) if chk_file is not None: sfw('CHECKFILE={}\\n\\n'.format(chk_file)) else:", "executable=args.executable, chk_file=args.chk_file, copy_chk=args.copy_chk, ln_running=args.ln_running, hold_jid=args.hold_jid) script_list.append(script_name) if not len(script_list) ==", "chk_file is not None: sfw('cp $CHECKFILE $CURRENTDIR/.\\n\\n') else: sfw('\\n') sfw('echo", "sure everything input gets a script and all the #", "the title/comment continue # if line.strip().isdigit(): # # the first", "running-bash-commands-in-python process = subprocess.Popen(cl, stdout=subprocess.PIPE, universal_newlines=True) output = process.communicate()[0] if", "be good to # be working on the newest version", "if chk_file is not None: sfw('# ') if not copy_chk", "f_name = path.rsplit('/', 1) rel_dir = rel_dir + '/' else:", "None: sfw('if [ ! -f {} ]; then\\n'.format( os.path.abspath(make_xyz)) +", "this will be the base name for linking the output", "$OUTPUTFILE $CURRENTDIR/.\\n') if chk_file is not None: sfw('cp $CHECKFILE $CURRENTDIR/.\\n\\n')", "Gaussian input file...') _out_name = coord_name.rsplit('.', 1)[0] + '.com' with", "if number 01,02,... They should all be the same length", "= ['qsub', f] # Don't really know how this works.", "number of atoms # continue # # XYZ files created", "want to ignore that # if line.strip().startswith('Create') or # line.strip().startswith('generated'):", "like: # # \"Created by mathematica\". Obv. want to ignore", "easier file input (with tab completion?) import subprocess # Allows", "# limitations under the License. # # # ######################################################################## #", "checkpoint file to the scratch directory before running the job.", "cores for job') # I should probably check validity of", "form should not matter, it will just be copied into", "be checked to exist first to make sure to not", "name of another job in the queuing system. :param str", "applicable law or agreed to in writing, software # #", "An input function that can prefill in the text entry", "None: sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE ' '$CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n')", "job_info and args.nojobinfo: for job in job_info: print(job) if args.verbose:", "not return the submitted job information') parser.add_argument('-k', '--chk_file', default=None, help='checkpoint", "short_name + '.out' elif '.' in file_name: short_name, input_extension =", "line.strip().isdigit(): # # the first line is the number of", "job should depend. This should be the name of another", "file with header for Gaussian calculation (up to and including", "for Gaussian calculation (up to and including the charge and", "# # implied. # # See the License for the", "and such :type mem: int or str :param mem: Minimum", "written to work with python 3 because it should be", "job in the queuing system. :param str xyz: Name of", "'from thtools import load_obj, get_node_mem;\\n' 'm = get_node_mem();\\n' 'd =", "num_files = len(_in_name_list) if num_files > 1: print('Multiple files starting", "be linked with the same base name. :param str hold_jid:", "obabel to be used to create an xyz file to", "return _job_info if __name__ == '__main__': description = 'Create and", "and creates a Gaussian input file ending with '.com' :param", "should be the name of another job in the queuing", "be printed (including file names) :return: name of the written", "in_name_list: script_name = write_sub_script(input_name=in_name, num_cores=args.numcores, time=args.time, verbose=args.verbose, executable=args.executable, chk_file=args.chk_file, copy_chk=args.copy_chk,", "never be the case as far as I know, but", "program more verbose') parser.add_argument('-j', '--nojobinfo', action='store_false', help='Do not return the", "use_template(args.template, in_name_list, args.verbose) script_list = [] for in_name in in_name_list:", "is not None: sfw('rm $CURRENTDIR/$WORKINGOUT') if chk_file is not None:", ":param str time: Amount of time to request in the", "help='create multiple scripts (batch job)') parser.add_argument('-x', '--template', default=None, help='template file", "mem: Minimum amount of memory to request :param str executable:", "-s`$SCRATCHDIR\\n\\n') sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable)) sfw('\\n\\n') if ln_running is not", "sfw('cp $OUTPUTFILE $CURRENTDIR/.\\n') if chk_file is not None: sfw('cp $CHECKFILE", "None: sfw('cp $CHECKFILE $CURRENTDIR/.\\n\\n') else: sfw('\\n') sfw('echo ran in /net/`hostname", "like to make sure everything input gets a script and", "in the format 'hh:mm:ss' :param bool verbose: If True, print", "and all the # script names are there to be", "will just be copied into the next file) and a", "language governing permissions and # # limitations under the License.", "to the current directory. If chk_file is not None, it", "if verbose: print('opened {}'.format(coord_name)) for i, line in enumerate(in_file): if", "be the header for the desired calculation (including charge and", "write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False, mem='125', executable='g09', chk_file=None, copy_chk=False, ln_running=None, hold_jid=None,", "-hold_jid {}\\n\\n'.format(hold_jid)) if make_xyz is not None: sfw('if [ !", "{}'.format(base_name)) if input('Did you mean to execute a batch job?", "$CURRENTDIR\\n\\n') if verbose: print('script written to {}'.format(_script_name)) return _script_name def", "_dir_and_file(path): warn('_dir_and_file is deprecated. Use os.path.split instead', DeprecationWarning) if '/'", "if verbose: print('script written to {}'.format(_script_name)) return _script_name def submit_scripts(scripts,", "name. :param str hold_jid: Job on which this job should", "an xyz file to pass to use_gen_template. :param bool make_input:", "{} to files to possibly submit.'.format(out_name)) _in_name_list = made_name_list _in_name_list.sort()", "if file_name.endswith('.com'): short_name = os.path.splitext(file_name)[0] if not short_name + '.com'", "+ ' exit 17\\n' 'fi\\n\\n') sfw('module load wxwidgets/3.0.2\\n') sfw('module load", "be written and copied back') parser.add_argument('--copy_chk', action='store_true', help='Copy check file", "create an xyz file to pass to use_gen_template. :param bool", "given') job_info = submit_scripts(script_list, args.batch, args.submit, args.verbose) if job_info and", "chk_file is not None: sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE '", "should depend. This should be the name of another job", "You may obtain a copy of the License at #", "input :param int num_cores: Number of cores to request :param", "a format Gaussian can read :param str template: name of", "str xyz: Name of an xyz file to use as", "read :param str template: name of file with header for", "has completed. If this is not None and make_input is", "to request :param str time: Amount of time to request", "system. :param str xyz: Name of an xyz file to", "parser.add_argument('-x', '--template', default=None, help='template file for creating input from coords')", "args.verbose) script_list = [] for in_name in in_name_list: script_name =", "first line is the number of atoms # continue #", "= [' '.join(output.split(' ')[2:4]) for output in outputs] return _job_info", "else: sfw('\\n') if ln_running is not None: sfw('WORKINGOUT={}.out\\n'.format(ln_running)) if chk_file", "the file to use as input :param int num_cores: Number", "is not None, it will also be linked with the", "will be chk_file. :return: The name of the script file", "set of molecular coordinates (the form should not matter, it", "parser.parse_args() in_name_list, args.batch = get_input_files(args.in_name, args.batch) if args.template: in_name_list =", "import argparse # For parsing commandline arguments import datetime import", "/net/`hostname -s`$PWD/$OUTPUTFILE ' '$CURRENTDIR/$WORKINGOUT\\n') if chk_file is not None: sfw('ln", "#! /usr/bin/env python3 ######################################################################## # # # This script was", "job)') parser.add_argument('-x', '--template', default=None, help='template file for creating input from", "into the next file) and a template file that should", "work with python 3 because it should be good to", "names given') job_info = submit_scripts(script_list, args.batch, args.submit, args.verbose) if job_info", "nproc, mem, or checkpoint because those will all be used", "as opposed to 1,...,9,10,... # if number 01,02,... They should", ":param bool verbose: If True, print out some status messages", "input for the Gaussian calculation. :param dict ugt_dict: dict of", "first to make sure to not waste time when missing", "file name shall I use?') _in_name_list = [rlinput('file name: ',", "print('Creating Gaussian input file...') _out_name = coord_name.rsplit('.', 1)[0] + '.com'", "to make sure to not waste time when missing a", "dict ugt_dict: dict of arguments to pass to use_gen_template. This", "the name of another job in the queuing system. :param", ":rtype: str \"\"\" if verbose: print('Creating Gaussian input file...') _out_name", "'g09', 'g16' :param str chk_file: If not None, this file", "file except in compliance with the License. # # You", "sfw('#!/bin/bash -l\\n\\n') sfw('#$ -pe omp {}\\n'.format(num_cores)) sfw('#$ -M <EMAIL>\\n') sfw('#$", "not None: sfw('WORKINGCHK={}.chk\\n\\n'.format(ln_running)) else: sfw('\\n') sfw('CURRENTDIR=`pwd`\\n') sfw('SCRATCHDIR=/scratch/$USER\\n') sfw('mkdir -p $SCRATCHDIR\\n\\n')", "in_name_list, args.verbose) script_list = [] for in_name in in_name_list: script_name", "This script was written by <NAME> in 2015. # #", "sfw('#$ -l mem_total={}G\\n'.format(mem)) sfw('#$ -N {}\\n'.format(job_name)) sfw('#$ -j y\\n') sfw('#$", "line in enumerate(in_file): if i < 2: # ignore first", "batch: if submit or input('submit all jobs? ') in yes:", "to run {} in /net/`'.format(executable) + 'hostname -s`$SCRATCHDIR\\n\\n') sfw('{} <$INPUTFILE", "[' '.join(output.split(' ')[2:4]) for output in outputs] return _job_info if", "eas\\n') sfw('#$ -l h_rt={}\\n'.format(time)) sfw('#$ -l mem_total={}G\\n'.format(mem)) sfw('#$ -N {}\\n'.format(job_name))", "not matter, it will just be copied into the next", "job and it will give quick feedback about that? parser.add_argument('-t',", "yes: rd, f = _dir_and_file(scripts[0]) with cd(rd, ignore_blank=True): cl =", "yes: for script in scripts: rd, f = _dir_and_file(script) with", "+ '.out' elif '.' in file_name: short_name, input_extension = os.path.splitext(file_name)", ".\\n\\n') else: sfw('\\n') if ln_running is not None: sfw('ln -s", "output in outputs] return _job_info if __name__ == '__main__': description", "file to the current directory. If chk_file is not None,", "the script file :rtype: str \"\"\" rel_dir, file_name = os.path.split(input_name)", "type=int, default=16, help='Number of cores for job') # I should", "'--chk_file', default=None, help='checkpoint file to be written and copied back')", "name: ', base_name)] return _in_name_list, batch def use_template(template, in_names, verbose):", "from warnings import warn from thtools import cd, make_obj_dir, save_obj,", "run a Gaussian job on SCC' parser = argparse.ArgumentParser(description=description) parser.add_argument('in_name',", "be mem; and checkpoint will be chk_file. :return: The name", "possibly submit.'.format(out_name)) _in_name_list = made_name_list _in_name_list.sort() _in_name_list.sort(key=len) return _in_name_list def", "input function that can prefill in the text entry #", "submit_scripts(scripts, batch=False, submit=False, verbose=False): outputs = [] if batch: if", "to use_gen_template. :param bool copy_chk: If this is True, the", "is {}'.format(file_name)) out_name = short_name + '.out' job_name = re.match(r'.*?([a-zA-Z].*)',", "')[2:4]) for output in outputs] return _job_info if __name__ ==", "parser = argparse.ArgumentParser(description=description) parser.add_argument('in_name', help='Name of Gaussian input file') parser.add_argument('-c',", "is not None: sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE ' '$CURRENTDIR/$WORKINGCHK\\n\\n')", "(useful if this gets changed after job submission); mem will", "job') # I should probably check validity of this time", "') in yes: for script in scripts: rd, f =", "Amount of time to request in the format 'hh:mm:ss' :param", "This is written to work with python 3 because it", "or make_xyz is not None: n_xyz = temp_xyz else: n_xyz", "readline.set_startup_hook() def _dir_and_file(path): warn('_dir_and_file is deprecated. Use os.path.split instead', DeprecationWarning)", "if submit or input('submit job {}? '.format(scripts[0])) in yes: rd,", "to use for the job Example, 'g09', 'g16' :param str", "as input a file with a set of molecular coordinates", "sfw('obabel {} -O {}\\n\\n'.format(os.path.abspath( make_xyz), os.path.abspath(n_xyz))) if make_input: sfw('python -c", "I know, but I would # like to make sure", "sfw('module load wxwidgets/3.0.2\\n') sfw('module load openbabel/2.4.1\\n\\n') sfw('obabel {} -O {}\\n\\n'.format(os.path.abspath(", "use_gen_template (if make_input is True). :param str make_xyz: The name", "(Gaussian) jobs for submission to queue If make_xyz is not", "# if number 01,02,... They should all be the same", "well. :param str ln_running: If not None, this will be", "None, it will also be linked with the same base", "base_name)] return _in_name_list, batch def use_template(template, in_names, verbose): made_name_list =", "not None: chk_line = 'checkpoint=\\'{}\\','.format(chk_file) else: chk_line = '' with", "made_name_list = [] for in_name in in_names: out_name = create_gau_input(in_name,", "an xyz file to use as input to use_gen_template (if", "= [] for in_name in in_names: out_name = create_gau_input(in_name, template,", "far as I know, but I would # like to", "os.path.abspath(n_xyz))) if make_input: sfw('python -c \"from gautools.tools import ' 'use_gen_template", "ignore that # if line.strip().startswith('Create') or # line.strip().startswith('generated'): # continue", "more verbose') parser.add_argument('-j', '--nojobinfo', action='store_false', help='Do not return the submitted", "in writing, software # # distributed under the License is", "file name?') out_name = short_name + '.out' else: short_name =", "outputs.append(output) else: if verbose: print('No jobs submitted, but scripts created')", "else: if verbose: print('No jobs submitted, but scripts created') else:", "continue # if line.strip().isdigit(): # # the first line is", "to the scratch directory before running the job. `chk_file` must", "created by mathematica have a comment # # as the", "http://stackoverflow.com/questions/4256107/ # running-bash-commands-in-python process = subprocess.Popen(cl, stdout=subprocess.PIPE, universal_newlines=True) output =", "if verbose: print('Added {} to files to possibly submit.'.format(out_name)) _in_name_list", "file system/file names import os import re import readline #", "something like: # # \"Created by mathematica\". Obv. want to", "files to possibly submit.'.format(out_name)) _in_name_list = made_name_list _in_name_list.sort() _in_name_list.sort(key=len) return", "None, the file make_xyz will be checked to exist first", "' + 'Period in file name?') out_name = short_name +", "elif '.' in file_name: short_name, input_extension = os.path.splitext(file_name) if not", "back after the job has completed. If this is not", "_in_name_list, batch def use_template(template, in_names, verbose): made_name_list = [] for", "if chk_file is not None: sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE", "not None, the file make_xyz will be checked to exist", "will be printed (including file names) :return: name of the", "really know how this works. Copied from # http://stackoverflow.com/questions/4256107/ #", "as out_file: with open(template, 'r') as templ_file: if verbose: print('opened", "template file that should be the header for the desired", "required by applicable law or agreed to in writing, software", "nproc will be $NSLOTS (useful if this gets changed after", "by mathematica\". Obv. want to ignore that # if line.strip().startswith('Create')", "# # # # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless", "['y', 'yes', '1'] # An input function that can prefill", "scripts dif. from num names given') job_info = submit_scripts(script_list, args.batch,", "with a set of molecular coordinates (the form should not", "is not None: n_xyz = temp_xyz else: n_xyz = resolve_path(xyz)", "anything. if not batch: num_files = len(_in_name_list) if num_files >", "\"\"\" make gaussian input file by combining header and coordinates", "check file to the scratch directory') parser.add_argument('-l', '--ln_running', type=str, default=None,", "of atoms and the title/comment continue # if line.strip().isdigit(): #", "int or str :param mem: Minimum amount of memory to", "of arguments to pass to use_gen_template. This should not include", "verbose: print('Creating Gaussian input file...') _out_name = coord_name.rsplit('.', 1)[0] +", "True, this will also be passed to use_gen_template. :param bool", "input(prompt) finally: readline.set_startup_hook() def _dir_and_file(path): warn('_dir_and_file is deprecated. Use os.path.split", "distributed under the License is distributed on an \"AS IS\"", "{}.log\\n\\n'.format(short_name)) if hold_jid is not None: sfw('#$ -hold_jid {}\\n\\n'.format(hold_jid)) if", "next file) and a template file that should be the", "batch: num_files = len(_in_name_list) if num_files > 1: print('Multiple files", "not None: sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE ' '$CURRENTDIR/$WORKINGOUT\\n') if", "the newest version of python. from __future__ import print_function import", "to request :param str executable: Executable file to use for", "default=None, help='checkpoint file to be written and copied back') parser.add_argument('--copy_chk',", "def rlinput(prompt, prefill=''): readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt) finally: readline.set_startup_hook()", "or str :param mem: Minimum amount of memory to request", "help='base name for linking output to cwd while ' 'running')", "input file by combining header and coordinates files This function", "parser.add_argument('-t', '--time', help='Time required as \"hh:mm:ss\"', default='12:00:00') parser.add_argument('-e', '--executable', type=str,", ":param bool verbose: If True, some status messages will be", "is written to work with python 3 because it should", "else: short_name = file_name file_name = short_name + '.com' print('Assuming", "how this works. Copied from # http://stackoverflow.com/questions/4256107/ # running-bash-commands-in-python process", "of python. from __future__ import print_function import argparse # For", "at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # # #", "can prefill in the text entry # Not sure if", "if ln_running is not None: sfw('rm $CURRENTDIR/$WORKINGOUT') if chk_file is", "to pass to use_gen_template. :param bool make_input: If True, use_gen_template", "print out some status messages and such :type mem: int", "a script and all the # script names are there", "the base name for linking the output file to the", "coordinates in a format Gaussian can read :param str template:", "$SCRATCHDIR\\n\\n') sfw('cp $CURRENTDIR/$INPUTFILE .\\n') if chk_file is not None: sfw('#", "as input to use_gen_template (if make_input is True). :param str", "file to use for the job Example, 'g09', 'g16' :param", "all jobs? ') in yes: for script in scripts: rd,", "know, but I would # like to make sure everything", "checkpoint because those will all be used from other arguments", "['qsub', f] # Don't really know how this works. Copied", "_out_name def get_input_files(base_name, batch): _in_name_list = glob.glob(base_name + '*') _in_name_list.sort()", "def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False, mem='125', executable='g09', chk_file=None, copy_chk=False, ln_running=None,", "import ' 'use_gen_template as ugt;\\n' 'from thtools import load_obj, get_node_mem;\\n'", "not None: sfw('if [ ! -f {} ]; then\\n'.format( os.path.abspath(make_xyz))", "about that? parser.add_argument('-t', '--time', help='Time required as \"hh:mm:ss\"', default='12:00:00') parser.add_argument('-e',", "a batch job? ') in yes: batch = True else:", "and the title/comment continue # if line.strip().isdigit(): # # the", "those will all be used from other arguments to this", "linking output to cwd while ' 'running') parser.add_argument('-d', '--hold_jid', default=None,", "scripts: rd, f = _dir_and_file(script) with cd(rd, ignore_blank=True): cl =", "License for the specific language governing permissions and # #", "mean to execute a batch job? ') in yes: batch", "= path.rsplit('/', 1) rel_dir = rel_dir + '/' else: rel_dir", "None, this file will be copied back after the job", "write_sub_script(input_name=in_name, num_cores=args.numcores, time=args.time, verbose=args.verbose, executable=args.executable, chk_file=args.chk_file, copy_chk=args.copy_chk, ln_running=args.ln_running, hold_jid=args.hold_jid) script_list.append(script_name)", "None: sfw('CHECKFILE={}\\n\\n'.format(chk_file)) else: sfw('\\n') if ln_running is not None: sfw('WORKINGOUT={}.out\\n'.format(ln_running))", "-s`$PWD/$OUTPUTFILE ' '$CURRENTDIR/$WORKINGOUT\\n') if chk_file is not None: sfw('ln -s", "# \"Created by mathematica\". Obv. want to ignore that #", "won't do anything. if not batch: num_files = len(_in_name_list) if", "os.path.splitext(file_name)[0] if not short_name + '.com' == file_name: raise SyntaxError('problem", "sfw('CURRENTDIR=`pwd`\\n') sfw('SCRATCHDIR=/scratch/$USER\\n') sfw('mkdir -p $SCRATCHDIR\\n\\n') sfw('cd $SCRATCHDIR\\n\\n') sfw('cp $CURRENTDIR/$INPUTFILE .\\n')", "use_template(template, in_names, verbose): made_name_list = [] for in_name in in_names:", "the License is distributed on an \"AS IS\" BASIS, #", "the job and it will give quick feedback about that?", "KIND, either express or # # implied. # # See", "in in_name_list: script_name = write_sub_script(input_name=in_name, num_cores=args.numcores, time=args.time, verbose=args.verbose, executable=args.executable, chk_file=args.chk_file,", "make_input: sfw('python -c \"from gautools.tools import ' 'use_gen_template as ugt;\\n'", "all be the same length and the # second sort", "yes = ['y', 'yes', '1'] # An input function that", "chk_file is not None: sfw('CHECKFILE={}\\n\\n'.format(chk_file)) else: sfw('\\n') if ln_running is", "os.path.abspath(pkl_path)) + 'ugt(\\'{}\\',\\'{}\\','.format( file_name, os.path.abspath(n_xyz)) + 'nproc=$NSLOTS,mem=m,{}'.format(chk_line) + '**d)\"\\n\\n') sfw('INPUTFILE={}\\n'.format(file_name))", "parser.add_argument('-j', '--nojobinfo', action='store_false', help='Do not return the submitted job information')", "make_xyz: The name of a file to pass to obabel", "args.submit, args.verbose) if job_info and args.nojobinfo: for job in job_info:", "'' f_name = path return rel_dir, f_name def create_gau_input(coord_name, template,", "= get_input_files(args.in_name, args.batch) if args.template: in_name_list = use_template(args.template, in_name_list, args.verbose)", "use_gen_template. :param bool make_input: If True, use_gen_template will be used", "of an xyz file to use as input to use_gen_template", "= create_gau_input(in_name, template, verbose=verbose) made_name_list.append(out_name) if verbose: print('Added {} to", "use_gen_template. This should not include out_file, xyz, nproc, mem, or", "file name?') out_name = short_name + '.out' elif '.' in", "else: sfw('\\n\\n') sfw('cp $OUTPUTFILE $CURRENTDIR/.\\n') if chk_file is not None:", "of the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 #", "1) rel_dir = rel_dir + '/' else: rel_dir = ''", "current directory. If chk_file is not None, it will also", "gone def rlinput(prompt, prefill=''): readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt) finally:", "may obtain a copy of the License at # #", "for i, line in enumerate(in_file): if i < 2: #", "make_xyz=None, make_input=False, ugt_dict=None): \"\"\" Write submission script for (Gaussian) jobs", "continue # else: out_file.write(line) out_file.write('\\n\\n\\n') if verbose: print('created Gaussian input", "arguments import datetime import glob # Allows referencing file system/file", "should be an existing checkpoint file to the scratch directory", "# number of atoms and the title/comment continue # if", "not None, this will be the base name for linking", "num_cores=16, time='12:00:00', verbose=False, mem='125', executable='g09', chk_file=None, copy_chk=False, ln_running=None, hold_jid=None, xyz=None,", "will be mem; and checkpoint will be chk_file. :return: The", "ln_running=args.ln_running, hold_jid=args.hold_jid) script_list.append(script_name) if not len(script_list) == len(in_name_list): # This", "matter, it will just be copied into the next file)", "after job submission); mem will be mem; and checkpoint will", "Copied from # http://stackoverflow.com/questions/4256107/ # running-bash-commands-in-python process = subprocess.Popen(cl, stdout=subprocess.PIPE,", "sfw('\\n\\n') if ln_running is not None: sfw('rm $CURRENTDIR/$WORKINGOUT') if chk_file", "cwd while ' 'running') parser.add_argument('-d', '--hold_jid', default=None, help='job on which", "may not use this file except in compliance with the", "there to be submitted. raise IOError('num scripts dif. from num", "# For parsing commandline arguments import datetime import glob #", "sfw('INPUTFILE={}\\n'.format(file_name)) sfw('OUTPUTFILE={}\\n'.format(out_name)) if chk_file is not None: sfw('CHECKFILE={}\\n\\n'.format(chk_file)) else: sfw('\\n')", "in scripts: rd, f = _dir_and_file(script) with cd(rd, ignore_blank=True): cl", "pkl_path = save_obj(ugt_dict, temp_pkl) if chk_file is not None: chk_line", "sfw('echo About to run {} in /net/`'.format(executable) + 'hostname -s`$SCRATCHDIR\\n\\n')", "outputs = [] if batch: if submit or input('submit all", "input file. :param str input_name: Name of the file to", "quick feedback about that? parser.add_argument('-t', '--time', help='Time required as \"hh:mm:ss\"',", "$CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n\\n') sfw('cp $OUTPUTFILE $CURRENTDIR/.\\n') if chk_file is not", "# # Licensed under the Apache License, Version 2.0 (the", "job {}? '.format(scripts[0])) in yes: rd, f = _dir_and_file(scripts[0]) with", "= coord_name.rsplit('.', 1)[0] + '.com' with open(_out_name, 'w') as out_file:", "{} in /net/`'.format(executable) + 'hostname -s`$SCRATCHDIR\\n\\n') sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable))", "use_gen_template. :param bool copy_chk: If this is True, the script", "including the charge and multiplicity) :param bool verbose: If True,", "n_xyz = resolve_path(xyz) temp_pkl = temp_xyz[:-4] if ugt_dict is not", "'.out' elif '.' in file_name: short_name, input_extension = os.path.splitext(file_name) if", "depend') args = parser.parse_args() in_name_list, args.batch = get_input_files(args.in_name, args.batch) if", "that can prefill in the text entry # Not sure", "name for linking output to cwd while ' 'running') parser.add_argument('-d',", "'w') as out_file: with open(template, 'r') as templ_file: if verbose:", "status messages and such :type mem: int or str :param", "scratch directory') parser.add_argument('-l', '--ln_running', type=str, default=None, help='base name for linking", ":param str make_xyz: The name of a file to pass", "load_obj(\\'{}\\');\\n'.format( os.path.abspath(pkl_path)) + 'ugt(\\'{}\\',\\'{}\\','.format( file_name, os.path.abspath(n_xyz)) + 'nproc=$NSLOTS,mem=m,{}'.format(chk_line) + '**d)\"\\n\\n')", "shell from warnings import warn from thtools import cd, make_obj_dir,", "time='12:00:00', verbose=False, mem='125', executable='g09', chk_file=None, copy_chk=False, ln_running=None, hold_jid=None, xyz=None, make_xyz=None,", "# This should never be the case as far as", "is not None: sfw('# ') if not copy_chk else None", "mem; and checkpoint will be chk_file. :return: The name of", "-b /net/`hostname -s`$PWD/$CHECKFILE ' '$CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n') sfw('echo About to", "help='Copy check file to the scratch directory') parser.add_argument('-l', '--ln_running', type=str,", "of the file to use as input :param int num_cores:", "input file ending with '.com' :param str coord_name: name of", "name for linking the output file to the current directory.", "atoms and the title/comment continue # if line.strip().isdigit(): # #", "amount of memory to request :param str executable: Executable file", "parser.add_argument('-c', '--numcores', type=int, default=16, help='Number of cores for job') #", "'r') as in_file: if verbose: print('opened {}'.format(coord_name)) for i, line", "default=16, help='Number of cores for job') # I should probably", "the Apache License, Version 2.0 (the \"License\"); # # you", "or # line.strip().startswith('generated'): # continue # else: out_file.write(line) out_file.write('\\n\\n\\n') if", "queuing system. :param str xyz: Name of an xyz file", "and # # limitations under the License. # # #", "a template file that should be the header for the", "to $CURRENTDIR\\n\\n') if verbose: print('script written to {}'.format(_script_name)) return _script_name", "out_name = short_name + '.out' else: short_name = file_name file_name", "would # put 1,10,11,... as opposed to 1,...,9,10,... # if", "job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1) if len(job_name) == 0: job_name =", "/net/`hostname -s`$PWD/$CHECKFILE ' '$CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n') sfw('echo About to run", "short_name).group(1) if len(job_name) == 0: job_name = 'default' _script_name =", "if chk_file is not None: sfw('cp $CHECKFILE $CURRENTDIR/.\\n\\n') else: sfw('\\n')", "with python 3 because it should be good to #", "job? ') in yes: batch = True else: print('What file", "+ 'nproc=$NSLOTS,mem=m,{}'.format(chk_line) + '**d)\"\\n\\n') sfw('INPUTFILE={}\\n'.format(file_name)) sfw('OUTPUTFILE={}\\n'.format(out_name)) if chk_file is not", "of another job in the queuing system. :param str xyz:", "names) :return: name of the written file :rtype: str \"\"\"", "str \"\"\" rel_dir, file_name = os.path.split(input_name) if file_name.endswith('.com'): short_name =", "charge and multiplicity), returns the name of the file, and", "will all be used from other arguments to this function.", "rd, f = _dir_and_file(scripts[0]) with cd(rd, ignore_blank=True): cl = ['qsub',", "and submit a script to run a Gaussian job on", "'d = load_obj(\\'{}\\');\\n'.format( os.path.abspath(pkl_path)) + 'ugt(\\'{}\\',\\'{}\\','.format( file_name, os.path.abspath(n_xyz)) + 'nproc=$NSLOTS,mem=m,{}'.format(chk_line)", "'hostname -s`$SCRATCHDIR\\n\\n') sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable)) sfw('\\n\\n') if ln_running is", "time request # Maybe it doesn't matter so much because", "copied back') parser.add_argument('--copy_chk', action='store_true', help='Copy check file to the scratch", "not None, it will also be linked with the same", "== len(in_name_list): # This should never be the case as", "sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\\n') sfw('echo output was copied to", "validity of this time request # Maybe it doesn't matter", "chk_file is not None, it will also be linked with", "if verbose: print('created Gaussian input file {}'.format(_out_name)) return _out_name def", "coord_name: name of file with coordinates in a format Gaussian", "it will just be copied into the next file) and", "verbose: print('{} not submitted'.format(scripts)) _job_info = [' '.join(output.split(' ')[2:4]) for", "depend. This should be the name of another job in", "If not None, this file will be copied back after", "coordinates files This function takes as input a file with", "queue If make_xyz is not None, the file make_xyz will", "the desired calculation (including charge and multiplicity), returns the name", "About to run {} in /net/`'.format(executable) + 'hostname -s`$SCRATCHDIR\\n\\n') sfw('{}", "# # # Unless required by applicable law or agreed", "be not None as well. :param str ln_running: If not", "make gaussian input file by combining header and coordinates files", "file, and creates a Gaussian input file ending with '.com'", "not None: sfw('CHECKFILE={}\\n\\n'.format(chk_file)) else: sfw('\\n') if ln_running is not None:", "files This function takes as input a file with a", "######################################################################## # This is written to work with python 3", "in yes: rd, f = _dir_and_file(scripts[0]) with cd(rd, ignore_blank=True): cl", "readline.insert_text(prefill)) try: return input(prompt) finally: readline.set_startup_hook() def _dir_and_file(path): warn('_dir_and_file is", "= 'checkpoint=\\'{}\\','.format(chk_file) else: chk_line = '' with open(_script_name, 'w') as", "time=args.time, verbose=args.verbose, executable=args.executable, chk_file=args.chk_file, copy_chk=args.copy_chk, ln_running=args.ln_running, hold_jid=args.hold_jid) script_list.append(script_name) if not", "file is {}'.format(file_name)) out_name = short_name + '.out' job_name =", "__future__ import print_function import argparse # For parsing commandline arguments", "the format 'hh:mm:ss' :param bool verbose: If True, print out", "f_name def create_gau_input(coord_name, template, verbose=True): \"\"\" make gaussian input file", "short_name + '.' + input_extension == file_name: raise SyntaxError('problem interpreting", "ln_running is not None: sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE '", "script_list.append(script_name) if not len(script_list) == len(in_name_list): # This should never", "input gets a script and all the # script names", "# # as the second line saying something like: #", "base name. :param str hold_jid: Job on which this job", "$CURRENTDIR/$CHECKFILE .\\n\\n') else: sfw('\\n') if ln_running is not None: sfw('ln", "if ln_running is not None: sfw('WORKINGOUT={}.out\\n'.format(ln_running)) if chk_file is not", "args.batch) if args.template: in_name_list = use_template(args.template, in_name_list, args.verbose) script_list =", "file_name = short_name + '.com' print('Assuming input file is {}'.format(file_name))", "'--nojobinfo', action='store_false', help='Do not return the submitted job information') parser.add_argument('-k',", "is not None: sfw('if [ ! -f {} ]; then\\n'.format(", "file. :param str input_name: Name of the file to use", "in 3.5+ because raw_input is gone def rlinput(prompt, prefill=''): readline.set_startup_hook(lambda:", "the job has completed. If this is not None and", "ignore_blank=True): cl = ['qsub', f] # Don't really know how", "out_name = short_name + '.out' elif '.' in file_name: short_name,", "with open(_script_name, 'w') as script_file: sfw = script_file.write sfw('#!/bin/bash -l\\n\\n')", "file) and a template file that should be the header", "parser.add_argument('-v', '--verbose', action='store_true', help='make program more verbose') parser.add_argument('-j', '--nojobinfo', action='store_false',", "mem_total={}G\\n'.format(mem)) sfw('#$ -N {}\\n'.format(job_name)) sfw('#$ -j y\\n') sfw('#$ -o {}.log\\n\\n'.format(short_name))", "os.path.split instead', DeprecationWarning) if '/' in path: rel_dir, f_name =", "this works. Copied from # http://stackoverflow.com/questions/4256107/ # running-bash-commands-in-python process =", "bool verbose: If True, some status messages will be printed", "chk_file is not None: sfw('# ') if not copy_chk else", "output = process.communicate()[0] if verbose: print(output) outputs.append(output) else: if verbose:", "args.template: in_name_list = use_template(args.template, in_name_list, args.verbose) script_list = [] for", "verbose=False): outputs = [] if batch: if submit or input('submit", "raise SyntaxError('problem interpreting file name. ' + 'Period in file", "warnings import warn from thtools import cd, make_obj_dir, save_obj, resolve_path", "Minimum amount of memory to request :param str executable: Executable", "default='g09', help='name of executable to run') parser.add_argument('-b', '--batch', action='store_true', help='create", "because those will all be used from other arguments to", "not len(script_list) == len(in_name_list): # This should never be the", "_dir_and_file(script) with cd(rd, ignore_blank=True): cl = ['qsub', f] # Don't", "01,02,... They should all be the same length and the", ":param str coord_name: name of file with coordinates in a", "short_name + '.out' else: short_name = file_name file_name = short_name", "sfw('#$ -m eas\\n') sfw('#$ -l h_rt={}\\n'.format(time)) sfw('#$ -l mem_total={}G\\n'.format(mem)) sfw('#$", "file :rtype: str \"\"\" rel_dir, file_name = os.path.split(input_name) if file_name.endswith('.com'):", "(because otherwise would # put 1,10,11,... as opposed to 1,...,9,10,...", "used to create input for the Gaussian calculation. :param dict", "will attempt to copy what should be an existing checkpoint", "wxwidgets/3.0.2\\n') sfw('module load openbabel/2.4.1\\n\\n') sfw('obabel {} -O {}\\n\\n'.format(os.path.abspath( make_xyz), os.path.abspath(n_xyz)))", "DeprecationWarning) if '/' in path: rel_dir, f_name = path.rsplit('/', 1)", "Name of an xyz file to use as input to", "'/' else: rel_dir = '' f_name = path return rel_dir,", "the submitted job information') parser.add_argument('-k', '--chk_file', default=None, help='checkpoint file to", "templ_file: if verbose: print('opened {}'.format(template)) for line in templ_file: out_file.write(line)", ":param str xyz: Name of an xyz file to use", "os.path.split(input_name) if file_name.endswith('.com'): short_name = os.path.splitext(file_name)[0] if not short_name +", "None: sfw('#$ -hold_jid {}\\n\\n'.format(hold_jid)) if make_xyz is not None: sfw('if", "will also be linked with the same base name. :param", "2.0 (the \"License\"); # # you may not use this", "\"\"\" if verbose: print('Creating Gaussian input file...') _out_name = coord_name.rsplit('.',", "name of file with coordinates in a format Gaussian can", "copied into the next file) and a template file that", "The name of a file to pass to obabel to", "file with a set of molecular coordinates (the form should", "sfw('\\n') if ln_running is not None: sfw('ln -s -b /net/`hostname", "is not None: sfw('WORKINGOUT={}.out\\n'.format(ln_running)) if chk_file is not None: sfw('WORKINGCHK={}.chk\\n\\n'.format(ln_running))", "= file_name file_name = short_name + '.com' print('Assuming input file", "-pe omp {}\\n'.format(num_cores)) sfw('#$ -M <EMAIL>\\n') sfw('#$ -m eas\\n') sfw('#$", "# ######################################################################## # This is written to work with python", "else: out_file.write(line) out_file.write('\\n\\n\\n') if verbose: print('created Gaussian input file {}'.format(_out_name))", "to request in the format 'hh:mm:ss' :param bool verbose: If", "Use os.path.split instead', DeprecationWarning) if '/' in path: rel_dir, f_name", "scripts created') else: if submit or input('submit job {}? '.format(scripts[0]))", "second line saying something like: # # \"Created by mathematica\".", "as far as I know, but I would # like", "= subprocess.Popen(cl, stdout=subprocess.PIPE, universal_newlines=True) output = process.communicate()[0] if verbose: print(output)", "compliance with the License. # # You may obtain a", "script and all the # script names are there to", "from __future__ import print_function import argparse # For parsing commandline", "n_xyz = temp_xyz else: n_xyz = resolve_path(xyz) temp_pkl = temp_xyz[:-4]", "help='name of executable to run') parser.add_argument('-b', '--batch', action='store_true', help='create multiple", "the file, and creates a Gaussian input file ending with", "cores to request :param str time: Amount of time to", "to 1,...,9,10,... # if number 01,02,... They should all be", "\"hh:mm:ss\"', default='12:00:00') parser.add_argument('-e', '--executable', type=str, default='g09', help='name of executable to", "# second sort won't do anything. if not batch: num_files", "line: out_file.write('\\n') with open(coord_name, 'r') as in_file: if verbose: print('opened", "is gone def rlinput(prompt, prefill=''): readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt)", "= os.path.abspath('.temp' + datetime.datetime.now().strftime('%H%M%S%f') + '.xyz') if xyz is None", "sfw('#$ -M <EMAIL>\\n') sfw('#$ -m eas\\n') sfw('#$ -l h_rt={}\\n'.format(time)) sfw('#$", "with coordinates in a format Gaussian can read :param str", "the License for the specific language governing permissions and #", "for line in templ_file: out_file.write(line) if '\\n' not in line:", "the file make_xyz will be checked to exist first to", "# Allows referencing file system/file names import os import re", "'--verbose', action='store_true', help='make program more verbose') parser.add_argument('-j', '--nojobinfo', action='store_false', help='Do", "the job Example, 'g09', 'g16' :param str chk_file: If not", "None sfw('cp $CURRENTDIR/$CHECKFILE .\\n\\n') else: sfw('\\n') if ln_running is not", "sfw('#$ -N {}\\n'.format(job_name)) sfw('#$ -j y\\n') sfw('#$ -o {}.log\\n\\n'.format(short_name)) if", "to obabel to be used to create an xyz file", "_in_name_list = made_name_list _in_name_list.sort() _in_name_list.sort(key=len) return _in_name_list def write_sub_script(input_name, num_cores=16,", "saying something like: # # \"Created by mathematica\". Obv. want", "Allows for submitting commands to the shell from warnings import", "and it will give quick feedback about that? parser.add_argument('-t', '--time',", "# the first line is the number of atoms #", "finally: readline.set_startup_hook() def _dir_and_file(path): warn('_dir_and_file is deprecated. Use os.path.split instead',", "<NAME> in 2015. # # <EMAIL> <EMAIL> # # #", "python. from __future__ import print_function import argparse # For parsing", "mathematica\". Obv. want to ignore that # if line.strip().startswith('Create') or", "f = _dir_and_file(scripts[0]) with cd(rd, ignore_blank=True): cl = ['qsub', f]", "_in_name_list.sort() # sort files alphanumerically _in_name_list.sort(key=len) # sort by length", "True, the script will attempt to copy what should be", "for the Gaussian calculation. :param dict ugt_dict: dict of arguments", "ln_running is not None: sfw('WORKINGOUT={}.out\\n'.format(ln_running)) if chk_file is not None:", "mem will be mem; and checkpoint will be chk_file. :return:", "process = subprocess.Popen(cl, stdout=subprocess.PIPE, universal_newlines=True) output = process.communicate()[0] if verbose:", "that? parser.add_argument('-t', '--time', help='Time required as \"hh:mm:ss\"', default='12:00:00') parser.add_argument('-e', '--executable',", "file_name.endswith('.com'): short_name = os.path.splitext(file_name)[0] if not short_name + '.com' ==", "bool verbose: If True, print out some status messages and", "parser.add_argument('-k', '--chk_file', default=None, help='checkpoint file to be written and copied", "to possibly submit.'.format(out_name)) _in_name_list = made_name_list _in_name_list.sort() _in_name_list.sort(key=len) return _in_name_list", "# script names are there to be submitted. raise IOError('num", "the case as far as I know, but I would", "'.com' with open(_out_name, 'w') as out_file: with open(template, 'r') as", "XYZ files created by mathematica have a comment # #", "file...') _out_name = coord_name.rsplit('.', 1)[0] + '.com' with open(_out_name, 'w')", "creating input from coords') parser.add_argument('-s', '--submit', action='store_true', help='Automatically submit jobs?')", "= glob.glob(base_name + '*') _in_name_list.sort() # sort files alphanumerically _in_name_list.sort(key=len)", "will be checked to exist first to make sure to", "str make_xyz: The name of a file to pass to", "# Copyright 2015 <NAME> IV # # # # Licensed", "help='job on which this job should depend') args = parser.parse_args()", "charge and multiplicity) :param bool verbose: If True, some status", "chk_file is not None: chk_line = 'checkpoint=\\'{}\\','.format(chk_file) else: chk_line =", "sort by length (because otherwise would # put 1,10,11,... as", "# distributed under the License is distributed on an \"AS", "# implied. # # See the License for the specific", "template, verbose=True): \"\"\" make gaussian input file by combining header", "of time to request in the format 'hh:mm:ss' :param bool", "# Unless required by applicable law or agreed to in", "be copied into the next file) and a template file", "file to the scratch directory') parser.add_argument('-l', '--ln_running', type=str, default=None, help='base", "to the shell from warnings import warn from thtools import", "-O {}\\n\\n'.format(os.path.abspath( make_xyz), os.path.abspath(n_xyz))) if make_input: sfw('python -c \"from gautools.tools", "files starting with {}'.format(base_name)) if input('Did you mean to execute", "<EMAIL> <EMAIL> # # # # Copyright 2015 <NAME> IV", "the charge and multiplicity) :param bool verbose: If True, some", "2015. # # <EMAIL> <EMAIL> # # # # Copyright", "sfw('WORKINGOUT={}.out\\n'.format(ln_running)) if chk_file is not None: sfw('WORKINGCHK={}.chk\\n\\n'.format(ln_running)) else: sfw('\\n') sfw('CURRENTDIR=`pwd`\\n')", "or input('submit all jobs? ') in yes: for script in", "# sort files alphanumerically _in_name_list.sort(key=len) # sort by length (because", "pass to obabel to be used to create an xyz", "title/comment continue # if line.strip().isdigit(): # # the first line", "this job should depend. This should be the name of", "or input('submit job {}? '.format(scripts[0])) in yes: rd, f =", "<EMAIL>\\n') sfw('#$ -m eas\\n') sfw('#$ -l h_rt={}\\n'.format(time)) sfw('#$ -l mem_total={}G\\n'.format(mem))", "to be used to create an xyz file to pass", "# An input function that can prefill in the text", "-M <EMAIL>\\n') sfw('#$ -m eas\\n') sfw('#$ -l h_rt={}\\n'.format(time)) sfw('#$ -l", "what should be an existing checkpoint file to the scratch", "import subprocess # Allows for submitting commands to the shell", "permissions and # # limitations under the License. # #", "takes as input a file with a set of molecular", "if verbose: print(output) outputs.append(output) else: if verbose: print('No jobs submitted,", "OR CONDITIONS OF ANY KIND, either express or # #", "'.com' print('Assuming input file is {}'.format(file_name)) out_name = short_name +", "number of atoms and the title/comment continue # if line.strip().isdigit():", "out_file.write(line) out_file.write('\\n\\n\\n') if verbose: print('created Gaussian input file {}'.format(_out_name)) return", "= process.communicate()[0] if verbose: print(output) outputs.append(output) else: if verbose: print('{}", "in line: out_file.write('\\n') with open(coord_name, 'r') as in_file: if verbose:", "batch): _in_name_list = glob.glob(base_name + '*') _in_name_list.sort() # sort files", "verbose=args.verbose, executable=args.executable, chk_file=args.chk_file, copy_chk=args.copy_chk, ln_running=args.ln_running, hold_jid=args.hold_jid) script_list.append(script_name) if not len(script_list)", "number 01,02,... They should all be the same length and", "'.' + input_extension == file_name: raise SyntaxError('problem interpreting file name.", "for output in outputs] return _job_info if __name__ == '__main__':", ":return: name of the written file :rtype: str \"\"\" if", "f_name = path return rel_dir, f_name def create_gau_input(coord_name, template, verbose=True):", "import print_function import argparse # For parsing commandline arguments import", "should never be the case as far as I know,", "print('Multiple files starting with {}'.format(base_name)) if input('Did you mean to", "for submission to queue If make_xyz is not None, the", "verbose: print(output) outputs.append(output) else: if verbose: print('No jobs submitted, but", "sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable)) sfw('\\n\\n') if ln_running is not None:", "it just won't # submit the job and it will", "scratch directory before running the job. `chk_file` must be not", "file ending with '.com' :param str coord_name: name of file", ".\\n') if chk_file is not None: sfw('# ') if not", ":param str hold_jid: Job on which this job should depend.", "-b /net/`hostname -s`$PWD/$OUTPUTFILE ' '$CURRENTDIR/$WORKINGOUT\\n') if chk_file is not None:", "two lines # number of atoms and the title/comment continue", "file with coordinates in a format Gaussian can read :param", "do anything. if not batch: num_files = len(_in_name_list) if num_files", "str ln_running: If not None, this will be the base", "enumerate(in_file): if i < 2: # ignore first two lines", "have a comment # # as the second line saying", "if i < 2: # ignore first two lines #", "some status messages and such :type mem: int or str", "make_xyz is not None, the file make_xyz will be checked", "If this is True, the script will attempt to copy", "of executable to run') parser.add_argument('-b', '--batch', action='store_true', help='create multiple scripts", "xyz file to pass to use_gen_template. :param bool make_input: If", "import load_obj, get_node_mem;\\n' 'm = get_node_mem();\\n' 'd = load_obj(\\'{}\\');\\n'.format( os.path.abspath(pkl_path))", "file make_xyz will be checked to exist first to make", ":param str ln_running: If not None, this will be the", "verbose: print('created Gaussian input file {}'.format(_out_name)) return _out_name def get_input_files(base_name,", "+ 'hostname -s`$SCRATCHDIR\\n\\n') sfw('{} <$INPUTFILE > $OUTPUTFILE'.format(executable)) sfw('\\n\\n') if ln_running", "it will give quick feedback about that? parser.add_argument('-t', '--time', help='Time", "in outputs] return _job_info if __name__ == '__main__': description =", "/net/`hostname -s`$SCRATCHDIR\\n') sfw('echo output was copied to $CURRENTDIR\\n\\n') if verbose:", "matter so much because it just won't # submit the", "len(_in_name_list) if num_files > 1: print('Multiple files starting with {}'.format(base_name))", "or checkpoint because those will all be used from other", "jobs for submission to queue If make_xyz is not None,", "yes: batch = True else: print('What file name shall I", "# running-bash-commands-in-python process = subprocess.Popen(cl, stdout=subprocess.PIPE, universal_newlines=True) output = process.communicate()[0]", "works. Copied from # http://stackoverflow.com/questions/4256107/ # running-bash-commands-in-python process = subprocess.Popen(cl,", "under the License is distributed on an \"AS IS\" BASIS,", "same base name. :param str hold_jid: Job on which this", "probably check validity of this time request # Maybe it", "action='store_true', help='make program more verbose') parser.add_argument('-j', '--nojobinfo', action='store_false', help='Do not", "<NAME> IV # # # # Licensed under the Apache", "but I would # like to make sure everything input", "the License. # # You may obtain a copy of", "file to pass to use_gen_template. :param bool make_input: If True,", "this file except in compliance with the License. # #", "commands to the shell from warnings import warn from thtools", "-j y\\n') sfw('#$ -o {}.log\\n\\n'.format(short_name)) if hold_jid is not None:", "information') parser.add_argument('-k', '--chk_file', default=None, help='checkpoint file to be written and", "input('Did you mean to execute a batch job? ') in", "all be used from other arguments to this function. out_file", "sfw('#$ -o {}.log\\n\\n'.format(short_name)) if hold_jid is not None: sfw('#$ -hold_jid", "executable to run') parser.add_argument('-b', '--batch', action='store_true', help='create multiple scripts (batch", "# # ######################################################################## # This is written to work with", "Apache License, Version 2.0 (the \"License\"); # # you may", "script_list = [] for in_name in in_name_list: script_name = write_sub_script(input_name=in_name,", "len(script_list) == len(in_name_list): # This should never be the case", "arguments to this function. out_file will be input_name; xyz will", "= [rlinput('file name: ', base_name)] return _in_name_list, batch def use_template(template,", "re import readline # Allows easier file input (with tab", "== 0: job_name = 'default' _script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh') temp_xyz", "out_file: with open(template, 'r') as templ_file: if verbose: print('opened {}'.format(template))", "short_name, input_extension = os.path.splitext(file_name) if not short_name + '.' +", "args.verbose) if job_info and args.nojobinfo: for job in job_info: print(job)", "parser.add_argument('-e', '--executable', type=str, default='g09', help='name of executable to run') parser.add_argument('-b',", "' exit 17\\n' 'fi\\n\\n') sfw('module load wxwidgets/3.0.2\\n') sfw('module load openbabel/2.4.1\\n\\n')", "'--executable', type=str, default='g09', help='name of executable to run') parser.add_argument('-b', '--batch',", "submitted'.format(scripts)) _job_info = [' '.join(output.split(' ')[2:4]) for output in outputs]", "python 3 because it should be good to # be", "= write_sub_script(input_name=in_name, num_cores=args.numcores, time=args.time, verbose=args.verbose, executable=args.executable, chk_file=args.chk_file, copy_chk=args.copy_chk, ln_running=args.ln_running, hold_jid=args.hold_jid)", "str coord_name: name of file with coordinates in a format", "if job_info and args.nojobinfo: for job in job_info: print(job) if", "= len(_in_name_list) if num_files > 1: print('Multiple files starting with", "job information') parser.add_argument('-k', '--chk_file', default=None, help='checkpoint file to be written", "ugt_dict: dict of arguments to pass to use_gen_template. This should", "' 'use_gen_template as ugt;\\n' 'from thtools import load_obj, get_node_mem;\\n' 'm", "for in_name in in_names: out_name = create_gau_input(in_name, template, verbose=verbose) made_name_list.append(out_name)", "This should not include out_file, xyz, nproc, mem, or checkpoint", "starting with {}'.format(base_name)) if input('Did you mean to execute a", "the # script names are there to be submitted. raise", "If True, some status messages will be printed (including file", "def create_gau_input(coord_name, template, verbose=True): \"\"\" make gaussian input file by", "name if make_xyz is not None; nproc will be $NSLOTS", "sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE ' '$CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n') sfw('echo", "help='Name of Gaussian input file') parser.add_argument('-c', '--numcores', type=int, default=16, help='Number", "is not None and make_input is True, this will also", "path return rel_dir, f_name def create_gau_input(coord_name, template, verbose=True): \"\"\" make", "'.join(output.split(' ')[2:4]) for output in outputs] return _job_info if __name__", "'.format(scripts[0])) in yes: rd, f = _dir_and_file(scripts[0]) with cd(rd, ignore_blank=True):", "if ugt_dict is not None: make_obj_dir() pkl_path = save_obj(ugt_dict, temp_pkl)", "xyz, nproc, mem, or checkpoint because those will all be", "the License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # #", "should not include out_file, xyz, nproc, mem, or checkpoint because", "$CURRENTDIR/.\\n\\n') else: sfw('\\n') sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\\n') sfw('echo output", "make_xyz is not None: n_xyz = temp_xyz else: n_xyz =", "to use_gen_template. This should not include out_file, xyz, nproc, mem,", "(up to and including the charge and multiplicity) :param bool", "# Maybe it doesn't matter so much because it just", "# See the License for the specific language governing permissions", "= 'Create and submit a script to run a Gaussian", "function that can prefill in the text entry # Not", "Copyright 2015 <NAME> IV # # # # Licensed under", "= path return rel_dir, f_name def create_gau_input(coord_name, template, verbose=True): \"\"\"", "arguments to pass to use_gen_template. This should not include out_file,", "# http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable", "{}'.format(_out_name)) return _out_name def get_input_files(base_name, batch): _in_name_list = glob.glob(base_name +", "to use as input :param int num_cores: Number of cores", "1: print('Multiple files starting with {}'.format(base_name)) if input('Did you mean", "pass to use_gen_template. This should not include out_file, xyz, nproc,", "written to {}'.format(_script_name)) return _script_name def submit_scripts(scripts, batch=False, submit=False, verbose=False):", "ugt_dict is not None: make_obj_dir() pkl_path = save_obj(ugt_dict, temp_pkl) if", "' '$CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n') sfw('echo About to run {} in", "of memory to request :param str executable: Executable file to", "# This script was written by <NAME> in 2015. #", "if line.strip().startswith('Create') or # line.strip().startswith('generated'): # continue # else: out_file.write(line)", "= short_name + '.out' job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1) if len(job_name)", "chk_line = 'checkpoint=\\'{}\\','.format(chk_file) else: chk_line = '' with open(_script_name, 'w')", "[rlinput('file name: ', base_name)] return _in_name_list, batch def use_template(template, in_names,", "this is not None and make_input is True, this will", "batch = True else: print('What file name shall I use?')", "so much because it just won't # submit the job", "2015 <NAME> IV # # # # Licensed under the", "ANY KIND, either express or # # implied. # #", "if '/' in path: rel_dir, f_name = path.rsplit('/', 1) rel_dir", "{}\\n'.format(num_cores)) sfw('#$ -M <EMAIL>\\n') sfw('#$ -m eas\\n') sfw('#$ -l h_rt={}\\n'.format(time))", "str chk_file: If not None, this file will be copied", "sfw('#$ -j y\\n') sfw('#$ -o {}.log\\n\\n'.format(short_name)) if hold_jid is not", "alphanumerically _in_name_list.sort(key=len) # sort by length (because otherwise would #", "The name of the script file :rtype: str \"\"\" rel_dir,", "header and coordinates files This function takes as input a", "Licensed under the Apache License, Version 2.0 (the \"License\"); #", "a time-based name if make_xyz is not None; nproc will", "= os.path.splitext(file_name) if not short_name + '.' + input_extension ==", "copy_chk=args.copy_chk, ln_running=args.ln_running, hold_jid=args.hold_jid) script_list.append(script_name) if not len(script_list) == len(in_name_list): #", "# # distributed under the License is distributed on an", "by mathematica have a comment # # as the second", "help='make program more verbose') parser.add_argument('-j', '--nojobinfo', action='store_false', help='Do not return", ":param int num_cores: Number of cores to request :param str", "= os.path.splitext(file_name)[0] if not short_name + '.com' == file_name: raise", "for submitting commands to the shell from warnings import warn", "was copied to $CURRENTDIR\\n\\n') if verbose: print('script written to {}'.format(_script_name))", "= load_obj(\\'{}\\');\\n'.format( os.path.abspath(pkl_path)) + 'ugt(\\'{}\\',\\'{}\\','.format( file_name, os.path.abspath(n_xyz)) + 'nproc=$NSLOTS,mem=m,{}'.format(chk_line) +", "'--hold_jid', default=None, help='job on which this job should depend') args", "make_input: If True, use_gen_template will be used to create input", "format Gaussian can read :param str template: name of file", "and multiplicity), returns the name of the file, and creates", "calculation. :param dict ugt_dict: dict of arguments to pass to", "args.nojobinfo: for job in job_info: print(job) if args.verbose: print('Done. Completed", "sfw('# ') if not copy_chk else None sfw('cp $CURRENTDIR/$CHECKFILE .\\n\\n')", "stdout=subprocess.PIPE, universal_newlines=True) output = process.communicate()[0] if verbose: print(output) outputs.append(output) else:", "by <NAME> in 2015. # # <EMAIL> <EMAIL> # #", "str \"\"\" if verbose: print('Creating Gaussian input file...') _out_name =", "is not None: sfw('WORKINGCHK={}.chk\\n\\n'.format(ln_running)) else: sfw('\\n') sfw('CURRENTDIR=`pwd`\\n') sfw('SCRATCHDIR=/scratch/$USER\\n') sfw('mkdir -p", "+ '.com' print('Assuming input file is {}'.format(file_name)) out_name = short_name", "+ '.com' == file_name: raise SyntaxError('problem interpreting file name. '", "= _dir_and_file(scripts[0]) with cd(rd, ignore_blank=True): cl = ['qsub', f] #", "chk_file is not None: sfw('WORKINGCHK={}.chk\\n\\n'.format(ln_running)) else: sfw('\\n') sfw('CURRENTDIR=`pwd`\\n') sfw('SCRATCHDIR=/scratch/$USER\\n') sfw('mkdir", "on SCC' parser = argparse.ArgumentParser(description=description) parser.add_argument('in_name', help='Name of Gaussian input", "3.5+ because raw_input is gone def rlinput(prompt, prefill=''): readline.set_startup_hook(lambda: readline.insert_text(prefill))", "open(_out_name, 'w') as out_file: with open(template, 'r') as templ_file: if", "[] for in_name in in_names: out_name = create_gau_input(in_name, template, verbose=verbose)", "They should all be the same length and the #", "to the scratch directory') parser.add_argument('-l', '--ln_running', type=str, default=None, help='base name", "be the name of another job in the queuing system.", "short_name = os.path.splitext(file_name)[0] if not short_name + '.com' == file_name:", "Gaussian input file') parser.add_argument('-c', '--numcores', type=int, default=16, help='Number of cores", "completion?) import subprocess # Allows for submitting commands to the", "h_rt={}\\n'.format(time)) sfw('#$ -l mem_total={}G\\n'.format(mem)) sfw('#$ -N {}\\n'.format(job_name)) sfw('#$ -j y\\n')", "'--ln_running', type=str, default=None, help='base name for linking output to cwd", "(with tab completion?) import subprocess # Allows for submitting commands", "this works in 3.5+ because raw_input is gone def rlinput(prompt,", "system/file names import os import re import readline # Allows", "outputs] return _job_info if __name__ == '__main__': description = 'Create", "to ignore that # if line.strip().startswith('Create') or # line.strip().startswith('generated'): #", "if verbose: print('opened {}'.format(template)) for line in templ_file: out_file.write(line) if", "instead', DeprecationWarning) if '/' in path: rel_dir, f_name = path.rsplit('/',", "as input :param int num_cores: Number of cores to request", "use as input :param int num_cores: Number of cores to", "input_name: Name of the file to use as input :param", "import os import re import readline # Allows easier file", "if len(job_name) == 0: job_name = 'default' _script_name = os.path.join(rel_dir,", "in enumerate(in_file): if i < 2: # ignore first two", "make_input=False, ugt_dict=None): \"\"\" Write submission script for (Gaussian) jobs for", "interpreting file name. ' + 'Period in file name?') out_name", "name. ' + 'Period in file name?') out_name = short_name", "'$CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n') sfw('echo About to run {} in /net/`'.format(executable)", "submit_scripts(script_list, args.batch, args.submit, args.verbose) if job_info and args.nojobinfo: for job", "submit.'.format(out_name)) _in_name_list = made_name_list _in_name_list.sort() _in_name_list.sort(key=len) return _in_name_list def write_sub_script(input_name,", "made_name_list _in_name_list.sort() _in_name_list.sort(key=len) return _in_name_list def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False,", "time to request in the format 'hh:mm:ss' :param bool verbose:", "this is True, the script will attempt to copy what", "= [] if batch: if submit or input('submit all jobs?", "save_obj(ugt_dict, temp_pkl) if chk_file is not None: chk_line = 'checkpoint=\\'{}\\','.format(chk_file)", "load wxwidgets/3.0.2\\n') sfw('module load openbabel/2.4.1\\n\\n') sfw('obabel {} -O {}\\n\\n'.format(os.path.abspath( make_xyz),", "in 2015. # # <EMAIL> <EMAIL> # # # #", "License at # # # # http://www.apache.org/licenses/LICENSE-2.0 # # #", "True, use_gen_template will be used to create input for the", ":param dict ugt_dict: dict of arguments to pass to use_gen_template.", "rel_dir, file_name = os.path.split(input_name) if file_name.endswith('.com'): short_name = os.path.splitext(file_name)[0] if", "template: name of file with header for Gaussian calculation (up", "working on the newest version of python. from __future__ import", "-m eas\\n') sfw('#$ -l h_rt={}\\n'.format(time)) sfw('#$ -l mem_total={}G\\n'.format(mem)) sfw('#$ -N", "short_name = file_name file_name = short_name + '.com' print('Assuming input", "'nproc=$NSLOTS,mem=m,{}'.format(chk_line) + '**d)\"\\n\\n') sfw('INPUTFILE={}\\n'.format(file_name)) sfw('OUTPUTFILE={}\\n'.format(out_name)) if chk_file is not None:", "# # # ######################################################################## # This is written to work", "such :type mem: int or str :param mem: Minimum amount", "mem, or checkpoint because those will all be used from", "the name of the file, and creates a Gaussian input", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "if not copy_chk else None sfw('cp $CURRENTDIR/$CHECKFILE .\\n\\n') else: sfw('\\n')", "just be copied into the next file) and a template", "If not None, this will be the base name for", "parser.add_argument('-b', '--batch', action='store_true', help='create multiple scripts (batch job)') parser.add_argument('-x', '--template',", "CONDITIONS OF ANY KIND, either express or # # implied.", "omp {}\\n'.format(num_cores)) sfw('#$ -M <EMAIL>\\n') sfw('#$ -m eas\\n') sfw('#$ -l", "is distributed on an \"AS IS\" BASIS, # # WITHOUT", "sfw('module load openbabel/2.4.1\\n\\n') sfw('obabel {} -O {}\\n\\n'.format(os.path.abspath( make_xyz), os.path.abspath(n_xyz))) if", "submit=False, verbose=False): outputs = [] if batch: if submit or", "because raw_input is gone def rlinput(prompt, prefill=''): readline.set_startup_hook(lambda: readline.insert_text(prefill)) try:", "17\\n' 'fi\\n\\n') sfw('module load wxwidgets/3.0.2\\n') sfw('module load openbabel/2.4.1\\n\\n') sfw('obabel {}", ":rtype: str \"\"\" rel_dir, file_name = os.path.split(input_name) if file_name.endswith('.com'): short_name", "temp_xyz = os.path.abspath('.temp' + datetime.datetime.now().strftime('%H%M%S%f') + '.xyz') if xyz is", "glob # Allows referencing file system/file names import os import", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "submitted. raise IOError('num scripts dif. from num names given') job_info", "') in yes: batch = True else: print('What file name", "# http://stackoverflow.com/questions/4256107/ # running-bash-commands-in-python process = subprocess.Popen(cl, stdout=subprocess.PIPE, universal_newlines=True) output", "$CURRENTDIR/.\\n') if chk_file is not None: sfw('cp $CHECKFILE $CURRENTDIR/.\\n\\n') else:", "else: sfw('\\n') if ln_running is not None: sfw('ln -s -b", "datetime import glob # Allows referencing file system/file names import", "$CURRENTDIR/$INPUTFILE .\\n') if chk_file is not None: sfw('# ') if", "cd, make_obj_dir, save_obj, resolve_path yes = ['y', 'yes', '1'] #", "raise IOError('num scripts dif. from num names given') job_info =", "not None, this file will be copied back after the", "input('submit job {}? '.format(scripts[0])) in yes: rd, f = _dir_and_file(scripts[0])", "implied. # # See the License for the specific language", "to be written and copied back') parser.add_argument('--copy_chk', action='store_true', help='Copy check", "ln_running: If not None, this will be the base name", "action='store_true', help='create multiple scripts (batch job)') parser.add_argument('-x', '--template', default=None, help='template", "law or agreed to in writing, software # # distributed", "-s`$PWD/$CHECKFILE ' '$CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n') sfw('echo About to run {}", "also be passed to use_gen_template. :param bool copy_chk: If this", "in_name in in_name_list: script_name = write_sub_script(input_name=in_name, num_cores=args.numcores, time=args.time, verbose=args.verbose, executable=args.executable,", "open(_script_name, 'w') as script_file: sfw = script_file.write sfw('#!/bin/bash -l\\n\\n') sfw('#$", "(including charge and multiplicity), returns the name of the file,", "help='Automatically submit jobs?') parser.add_argument('-v', '--verbose', action='store_true', help='make program more verbose')", "molecular coordinates (the form should not matter, it will just", "lines # number of atoms and the title/comment continue #", "if not batch: num_files = len(_in_name_list) if num_files > 1:", "make sure everything input gets a script and all the", "if make_xyz is not None; nproc will be $NSLOTS (useful", "because it should be good to # be working on", "copy_chk=False, ln_running=None, hold_jid=None, xyz=None, make_xyz=None, make_input=False, ugt_dict=None): \"\"\" Write submission", "# # Copyright 2015 <NAME> IV # # # #", "an existing checkpoint file to the scratch directory before running", "> $OUTPUTFILE'.format(executable)) sfw('\\n\\n') if ln_running is not None: sfw('rm $CURRENTDIR/$WORKINGOUT')", "= short_name + '.out' elif '.' in file_name: short_name, input_extension", "sfw('OUTPUTFILE={}\\n'.format(out_name)) if chk_file is not None: sfw('CHECKFILE={}\\n\\n'.format(chk_file)) else: sfw('\\n') if", "desired calculation (including charge and multiplicity), returns the name of", "'Period in file name?') out_name = short_name + '.out' elif", "job on SCC' parser = argparse.ArgumentParser(description=description) parser.add_argument('in_name', help='Name of Gaussian", "if not len(script_list) == len(in_name_list): # This should never be", "as script_file: sfw = script_file.write sfw('#!/bin/bash -l\\n\\n') sfw('#$ -pe omp", "in compliance with the License. # # You may obtain", "# submit the job and it will give quick feedback", "with cd(rd, ignore_blank=True): cl = ['qsub', f] # Don't really", "# you may not use this file except in compliance", "'\\n' not in line: out_file.write('\\n') with open(coord_name, 'r') as in_file:", "', base_name)] return _in_name_list, batch def use_template(template, in_names, verbose): made_name_list", "\"from gautools.tools import ' 'use_gen_template as ugt;\\n' 'from thtools import", "required as \"hh:mm:ss\"', default='12:00:00') parser.add_argument('-e', '--executable', type=str, default='g09', help='name of", "job. `chk_file` must be not None as well. :param str", "# # # # Copyright 2015 <NAME> IV # #", "sort won't do anything. if not batch: num_files = len(_in_name_list)", "{}'.format(template)) for line in templ_file: out_file.write(line) if '\\n' not in", "default='12:00:00') parser.add_argument('-e', '--executable', type=str, default='g09', help='name of executable to run')", "prefill in the text entry # Not sure if this", "jobs?') parser.add_argument('-v', '--verbose', action='store_true', help='make program more verbose') parser.add_argument('-j', '--nojobinfo',", "case as far as I know, but I would #", "tab completion?) import subprocess # Allows for submitting commands to", "with the same base name. :param str hold_jid: Job on", "I would # like to make sure everything input gets", "can read :param str template: name of file with header", "# # XYZ files created by mathematica have a comment", "get_node_mem;\\n' 'm = get_node_mem();\\n' 'd = load_obj(\\'{}\\');\\n'.format( os.path.abspath(pkl_path)) + 'ugt(\\'{}\\',\\'{}\\','.format(", "+ datetime.datetime.now().strftime('%H%M%S%f') + '.xyz') if xyz is None or make_xyz", "line in templ_file: out_file.write(line) if '\\n' not in line: out_file.write('\\n')", "If chk_file is not None, it will also be linked", "deprecated. Use os.path.split instead', DeprecationWarning) if '/' in path: rel_dir,", "not copy_chk else None sfw('cp $CURRENTDIR/$CHECKFILE .\\n\\n') else: sfw('\\n') if", "the second line saying something like: # # \"Created by", "before running the job. `chk_file` must be not None as", "waste time when missing a necessary input file. :param str", "_in_name_list.sort(key=len) return _in_name_list def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False, mem='125', executable='g09',", "good to # be working on the newest version of", "is not None: sfw('cp $CHECKFILE $CURRENTDIR/.\\n\\n') else: sfw('\\n') sfw('echo ran", "path.rsplit('/', 1) rel_dir = rel_dir + '/' else: rel_dir =", "{}'.format(_script_name)) return _script_name def submit_scripts(scripts, batch=False, submit=False, verbose=False): outputs =", "'submit'+short_name+'.sh') temp_xyz = os.path.abspath('.temp' + datetime.datetime.now().strftime('%H%M%S%f') + '.xyz') if xyz", "continue # # XYZ files created by mathematica have a", "$OUTPUTFILE'.format(executable)) sfw('\\n\\n') if ln_running is not None: sfw('rm $CURRENTDIR/$WORKINGOUT') if", "Gaussian calculation (up to and including the charge and multiplicity)", "run') parser.add_argument('-b', '--batch', action='store_true', help='create multiple scripts (batch job)') parser.add_argument('-x',", "ln_running is not None: sfw('rm $CURRENTDIR/$WORKINGOUT') if chk_file is not", "int num_cores: Number of cores to request :param str time:", "be xyz or a time-based name if make_xyz is not", "save_obj, resolve_path yes = ['y', 'yes', '1'] # An input", "is True, this will also be passed to use_gen_template. :param", "sfw('WORKINGCHK={}.chk\\n\\n'.format(ln_running)) else: sfw('\\n') sfw('CURRENTDIR=`pwd`\\n') sfw('SCRATCHDIR=/scratch/$USER\\n') sfw('mkdir -p $SCRATCHDIR\\n\\n') sfw('cd $SCRATCHDIR\\n\\n')", "{}? '.format(scripts[0])) in yes: rd, f = _dir_and_file(scripts[0]) with cd(rd,", "Don't really know how this works. Copied from # http://stackoverflow.com/questions/4256107/", "except in compliance with the License. # # You may", "of a file to pass to obabel to be used", "# # http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by", "sfw(' $CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n\\n') sfw('cp $OUTPUTFILE $CURRENTDIR/.\\n') if chk_file is", "comment # # as the second line saying something like:", "line.strip().startswith('generated'): # continue # else: out_file.write(line) out_file.write('\\n\\n\\n') if verbose: print('created", "agreed to in writing, software # # distributed under the", "None; nproc will be $NSLOTS (useful if this gets changed", "be used to create an xyz file to pass to", "type=str, default=None, help='base name for linking output to cwd while", "+ '.com' with open(_out_name, 'w') as out_file: with open(template, 'r')", "submitted job information') parser.add_argument('-k', '--chk_file', default=None, help='checkpoint file to be", "for creating input from coords') parser.add_argument('-s', '--submit', action='store_true', help='Automatically submit", "None: sfw('# ') if not copy_chk else None sfw('cp $CURRENTDIR/$CHECKFILE", "job has completed. If this is not None and make_input", "def get_input_files(base_name, batch): _in_name_list = glob.glob(base_name + '*') _in_name_list.sort() #", "'Period in file name?') out_name = short_name + '.out' else:", "OF ANY KIND, either express or # # implied. #", "ugt_dict=None): \"\"\" Write submission script for (Gaussian) jobs for submission", "(including file names) :return: name of the written file :rtype:", "= argparse.ArgumentParser(description=description) parser.add_argument('in_name', help='Name of Gaussian input file') parser.add_argument('-c', '--numcores',", "of the script file :rtype: str \"\"\" rel_dir, file_name =", "make_obj_dir() pkl_path = save_obj(ugt_dict, temp_pkl) if chk_file is not None:", "{}\\n'.format(job_name)) sfw('#$ -j y\\n') sfw('#$ -o {}.log\\n\\n'.format(short_name)) if hold_jid is", "= '' with open(_script_name, 'w') as script_file: sfw = script_file.write", "= ['y', 'yes', '1'] # An input function that can", "to not waste time when missing a necessary input file.", "= made_name_list _in_name_list.sort() _in_name_list.sort(key=len) return _in_name_list def write_sub_script(input_name, num_cores=16, time='12:00:00',", "Gaussian input file {}'.format(_out_name)) return _out_name def get_input_files(base_name, batch): _in_name_list", "python3 ######################################################################## # # # This script was written by", "not None: sfw('ln -s -b /net/`hostname -s`$PWD/$CHECKFILE ' '$CURRENTDIR/$WORKINGCHK\\n\\n') else:", "in in_names: out_name = create_gau_input(in_name, template, verbose=verbose) made_name_list.append(out_name) if verbose:", "os.path.abspath('.temp' + datetime.datetime.now().strftime('%H%M%S%f') + '.xyz') if xyz is None or", "name of a file to pass to obabel to be", "software # # distributed under the License is distributed on", "_out_name = coord_name.rsplit('.', 1)[0] + '.com' with open(_out_name, 'w') as", "on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR", "is None or make_xyz is not None: n_xyz = temp_xyz", "import cd, make_obj_dir, save_obj, resolve_path yes = ['y', 'yes', '1']", "name?') out_name = short_name + '.out' elif '.' in file_name:", "the License. # # # ######################################################################## # This is written", "temp_xyz else: n_xyz = resolve_path(xyz) temp_pkl = temp_xyz[:-4] if ugt_dict", "print_function import argparse # For parsing commandline arguments import datetime", "not None and make_input is True, this will also be", "not None: sfw(' $CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n\\n') sfw('cp $OUTPUTFILE $CURRENTDIR/.\\n') if", "must be not None as well. :param str ln_running: If", "_script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh') temp_xyz = os.path.abspath('.temp' + datetime.datetime.now().strftime('%H%M%S%f') +", "second sort won't do anything. if not batch: num_files =", "file_name, os.path.abspath(n_xyz)) + 'nproc=$NSLOTS,mem=m,{}'.format(chk_line) + '**d)\"\\n\\n') sfw('INPUTFILE={}\\n'.format(file_name)) sfw('OUTPUTFILE={}\\n'.format(out_name)) if chk_file", "function takes as input a file with a set of", "http://www.apache.org/licenses/LICENSE-2.0 # # # # Unless required by applicable law", "if this works in 3.5+ because raw_input is gone def", "if line.strip().isdigit(): # # the first line is the number", "# continue # else: out_file.write(line) out_file.write('\\n\\n\\n') if verbose: print('created Gaussian", "subprocess # Allows for submitting commands to the shell from", "for job in job_info: print(job) if args.verbose: print('Done. Completed normally.')", "{}'.format(coord_name)) for i, line in enumerate(in_file): if i < 2:", "with open(template, 'r') as templ_file: if verbose: print('opened {}'.format(template)) for", "+ '.out' else: short_name = file_name file_name = short_name +", "to pass to use_gen_template. This should not include out_file, xyz,", "-o {}.log\\n\\n'.format(short_name)) if hold_jid is not None: sfw('#$ -hold_jid {}\\n\\n'.format(hold_jid))", "calculation (including charge and multiplicity), returns the name of the", "in_names, verbose): made_name_list = [] for in_name in in_names: out_name", "out_file.write('\\n') with open(coord_name, 'r') as in_file: if verbose: print('opened {}'.format(coord_name))", "the output file to the current directory. If chk_file is", "in yes: for script in scripts: rd, f = _dir_and_file(script)", "<$INPUTFILE > $OUTPUTFILE'.format(executable)) sfw('\\n\\n') if ln_running is not None: sfw('rm", "verbose: print('opened {}'.format(template)) for line in templ_file: out_file.write(line) if '\\n'", "os.path.splitext(file_name) if not short_name + '.' + input_extension == file_name:", "in a format Gaussian can read :param str template: name", "file :rtype: str \"\"\" if verbose: print('Creating Gaussian input file...')", "not None: n_xyz = temp_xyz else: n_xyz = resolve_path(xyz) temp_pkl", "License is distributed on an \"AS IS\" BASIS, # #", "= process.communicate()[0] if verbose: print(output) outputs.append(output) else: if verbose: print('No", "'hh:mm:ss' :param bool verbose: If True, print out some status", "0: job_name = 'default' _script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh') temp_xyz =", "sfw('if [ ! -f {} ]; then\\n'.format( os.path.abspath(make_xyz)) + '", "None and make_input is True, this will also be passed", "file input (with tab completion?) import subprocess # Allows for", "to use_gen_template (if make_input is True). :param str make_xyz: The", "to queue If make_xyz is not None, the file make_xyz", "on the newest version of python. from __future__ import print_function", "make_xyz is not None; nproc will be $NSLOTS (useful if", "submitted, but scripts created') else: if submit or input('submit job", "script file :rtype: str \"\"\" rel_dir, file_name = os.path.split(input_name) if", "License, Version 2.0 (the \"License\"); # # you may not", "when missing a necessary input file. :param str input_name: Name", "names are there to be submitted. raise IOError('num scripts dif.", ":param str executable: Executable file to use for the job", "# This is written to work with python 3 because", "a copy of the License at # # # #", "not None: make_obj_dir() pkl_path = save_obj(ugt_dict, temp_pkl) if chk_file is", "'' with open(_script_name, 'w') as script_file: sfw = script_file.write sfw('#!/bin/bash", "in the text entry # Not sure if this works", "entry # Not sure if this works in 3.5+ because", "True). :param str make_xyz: The name of a file to", "_in_name_list def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False, mem='125', executable='g09', chk_file=None, copy_chk=False,", "xyz is None or make_xyz is not None: n_xyz =", "sfw('echo output was copied to $CURRENTDIR\\n\\n') if verbose: print('script written", "sfw('rm $CURRENTDIR/$WORKINGOUT') if chk_file is not None: sfw(' $CURRENTDIR/$WORKINGCHK\\n\\n') else:", "# Allows easier file input (with tab completion?) import subprocess", "out_name = create_gau_input(in_name, template, verbose=verbose) made_name_list.append(out_name) if verbose: print('Added {}", "else: if submit or input('submit job {}? '.format(scripts[0])) in yes:", "submission script for (Gaussian) jobs for submission to queue If", "# put 1,10,11,... as opposed to 1,...,9,10,... # if number", "file to use as input to use_gen_template (if make_input is", "help='Do not return the submitted job information') parser.add_argument('-k', '--chk_file', default=None,", "input file is {}'.format(file_name)) out_name = short_name + '.out' job_name", "action='store_true', help='Automatically submit jobs?') parser.add_argument('-v', '--verbose', action='store_true', help='make program more", "'$CURRENTDIR/$WORKINGOUT\\n') if chk_file is not None: sfw('ln -s -b /net/`hostname", "hold_jid is not None: sfw('#$ -hold_jid {}\\n\\n'.format(hold_jid)) if make_xyz is", "of Gaussian input file') parser.add_argument('-c', '--numcores', type=int, default=16, help='Number of", "gets a script and all the # script names are", "referencing file system/file names import os import re import readline", "gautools.tools import ' 'use_gen_template as ugt;\\n' 'from thtools import load_obj,", "multiplicity) :param bool verbose: If True, some status messages will", "with open(_out_name, 'w') as out_file: with open(template, 'r') as templ_file:", "scripts (batch job)') parser.add_argument('-x', '--template', default=None, help='template file for creating", "will be used to create input for the Gaussian calculation.", "! -f {} ]; then\\n'.format( os.path.abspath(make_xyz)) + ' exit 17\\n'", "to create an xyz file to pass to use_gen_template. :param", "'__main__': description = 'Create and submit a script to run", "License. # # You may obtain a copy of the", "not include out_file, xyz, nproc, mem, or checkpoint because those", "all the # script names are there to be submitted.", "and copied back') parser.add_argument('--copy_chk', action='store_true', help='Copy check file to the", "help='template file for creating input from coords') parser.add_argument('-s', '--submit', action='store_true',", "# else: out_file.write(line) out_file.write('\\n\\n\\n') if verbose: print('created Gaussian input file", "'fi\\n\\n') sfw('module load wxwidgets/3.0.2\\n') sfw('module load openbabel/2.4.1\\n\\n') sfw('obabel {} -O", "process.communicate()[0] if verbose: print(output) outputs.append(output) else: if verbose: print('No jobs", "doesn't matter so much because it just won't # submit", "files created by mathematica have a comment # # as", "file to the scratch directory before running the job. `chk_file`", ":type mem: int or str :param mem: Minimum amount of", "to run') parser.add_argument('-b', '--batch', action='store_true', help='create multiple scripts (batch job)')", "directory. If chk_file is not None, it will also be", "else: n_xyz = resolve_path(xyz) temp_pkl = temp_xyz[:-4] if ugt_dict is", "sfw('#$ -hold_jid {}\\n\\n'.format(hold_jid)) if make_xyz is not None: sfw('if [", "') if not copy_chk else None sfw('cp $CURRENTDIR/$CHECKFILE .\\n\\n') else:", "'use_gen_template as ugt;\\n' 'from thtools import load_obj, get_node_mem;\\n' 'm =", "True, print out some status messages and such :type mem:", "it will also be linked with the same base name.", "request :param str time: Amount of time to request in", "names import os import re import readline # Allows easier", "if input('Did you mean to execute a batch job? ')", "use this file except in compliance with the License. #", "line is the number of atoms # continue # #", "'.' in file_name: short_name, input_extension = os.path.splitext(file_name) if not short_name", "for (Gaussian) jobs for submission to queue If make_xyz is", "datetime.datetime.now().strftime('%H%M%S%f') + '.xyz') if xyz is None or make_xyz is", "-s`$SCRATCHDIR\\n') sfw('echo output was copied to $CURRENTDIR\\n\\n') if verbose: print('script", "is True). :param str make_xyz: The name of a file", "'ugt(\\'{}\\',\\'{}\\','.format( file_name, os.path.abspath(n_xyz)) + 'nproc=$NSLOTS,mem=m,{}'.format(chk_line) + '**d)\"\\n\\n') sfw('INPUTFILE={}\\n'.format(file_name)) sfw('OUTPUTFILE={}\\n'.format(out_name)) if", "by combining header and coordinates files This function takes as", "use?') _in_name_list = [rlinput('file name: ', base_name)] return _in_name_list, batch", "sure if this works in 3.5+ because raw_input is gone", "+ '**d)\"\\n\\n') sfw('INPUTFILE={}\\n'.format(file_name)) sfw('OUTPUTFILE={}\\n'.format(out_name)) if chk_file is not None: sfw('CHECKFILE={}\\n\\n'.format(chk_file))", "== file_name: raise SyntaxError('problem interpreting file name. ' + 'Period", "= short_name + '.com' print('Assuming input file is {}'.format(file_name)) out_name", "first two lines # number of atoms and the title/comment", "default=None, help='job on which this job should depend') args =", "as well. :param str ln_running: If not None, this will", "combining header and coordinates files This function takes as input", "'*') _in_name_list.sort() # sort files alphanumerically _in_name_list.sort(key=len) # sort by", "str hold_jid: Job on which this job should depend. This", "= rel_dir + '/' else: rel_dir = '' f_name =", "in file name?') out_name = short_name + '.out' elif '.'", "\"License\"); # # you may not use this file except", "This should be the name of another job in the", "be input_name; xyz will be xyz or a time-based name", "passed to use_gen_template. :param bool copy_chk: If this is True,", "SyntaxError('problem interpreting file name. ' + 'Period in file name?')", "thtools import load_obj, get_node_mem;\\n' 'm = get_node_mem();\\n' 'd = load_obj(\\'{}\\');\\n'.format(", "copy what should be an existing checkpoint file to the", "str input_name: Name of the file to use as input", "hold_jid=args.hold_jid) script_list.append(script_name) if not len(script_list) == len(in_name_list): # This should", "or a time-based name if make_xyz is not None; nproc", "= re.match(r'.*?([a-zA-Z].*)', short_name).group(1) if len(job_name) == 0: job_name = 'default'", "print('opened {}'.format(template)) for line in templ_file: out_file.write(line) if '\\n' not", "f = _dir_and_file(script) with cd(rd, ignore_blank=True): cl = ['qsub', f]", "copy_chk: If this is True, the script will attempt to", "after the job has completed. If this is not None", "/usr/bin/env python3 ######################################################################## # # # This script was written", "If True, print out some status messages and such :type", "'.out' else: short_name = file_name file_name = short_name + '.com'", "and a template file that should be the header for", "1,...,9,10,... # if number 01,02,... They should all be the", "multiplicity), returns the name of the file, and creates a", "rd, f = _dir_and_file(script) with cd(rd, ignore_blank=True): cl = ['qsub',", "if args.template: in_name_list = use_template(args.template, in_name_list, args.verbose) script_list = []", "as the second line saying something like: # # \"Created", "another job in the queuing system. :param str xyz: Name", "# # <EMAIL> <EMAIL> # # # # Copyright 2015", "# sort by length (because otherwise would # put 1,10,11,...", "$CHECKFILE $CURRENTDIR/.\\n\\n') else: sfw('\\n') sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\\n') sfw('echo", "submit jobs?') parser.add_argument('-v', '--verbose', action='store_true', help='make program more verbose') parser.add_argument('-j',", "os.path.abspath(n_xyz)) + 'nproc=$NSLOTS,mem=m,{}'.format(chk_line) + '**d)\"\\n\\n') sfw('INPUTFILE={}\\n'.format(file_name)) sfw('OUTPUTFILE={}\\n'.format(out_name)) if chk_file is", "verbose: print('script written to {}'.format(_script_name)) return _script_name def submit_scripts(scripts, batch=False,", "this gets changed after job submission); mem will be mem;", "script will attempt to copy what should be an existing", "chk_line = '' with open(_script_name, 'w') as script_file: sfw =", "_script_name def submit_scripts(scripts, batch=False, submit=False, verbose=False): outputs = [] if", "If True, use_gen_template will be used to create input for", "this time request # Maybe it doesn't matter so much", "# # limitations under the License. # # # ########################################################################", "parser.add_argument('--copy_chk', action='store_true', help='Copy check file to the scratch directory') parser.add_argument('-l',", "input to use_gen_template (if make_input is True). :param str make_xyz:", "files alphanumerically _in_name_list.sort(key=len) # sort by length (because otherwise would", "name of the script file :rtype: str \"\"\" rel_dir, file_name", "<EMAIL> # # # # Copyright 2015 <NAME> IV #", "which this job should depend') args = parser.parse_args() in_name_list, args.batch", "jobs? ') in yes: for script in scripts: rd, f", "(batch job)') parser.add_argument('-x', '--template', default=None, help='template file for creating input", "'--numcores', type=int, default=16, help='Number of cores for job') # I", "which this job should depend. This should be the name", "bool make_input: If True, use_gen_template will be used to create", ":param mem: Minimum amount of memory to request :param str", "the same base name. :param str hold_jid: Job on which", "subprocess.Popen(cl, stdout=subprocess.PIPE, universal_newlines=True) output = process.communicate()[0] if verbose: print(output) outputs.append(output)", "verbose: print(output) outputs.append(output) else: if verbose: print('{} not submitted'.format(scripts)) _job_info", "warn from thtools import cd, make_obj_dir, save_obj, resolve_path yes =", "os.path.join(rel_dir, 'submit'+short_name+'.sh') temp_xyz = os.path.abspath('.temp' + datetime.datetime.now().strftime('%H%M%S%f') + '.xyz') if", "is not None: sfw(' $CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n\\n') sfw('cp $OUTPUTFILE $CURRENTDIR/.\\n')", "if make_xyz is not None: sfw('if [ ! -f {}", "process.communicate()[0] if verbose: print(output) outputs.append(output) else: if verbose: print('{} not", "`chk_file` must be not None as well. :param str ln_running:", "short_name + '.com' print('Assuming input file is {}'.format(file_name)) out_name =", "print(output) outputs.append(output) else: if verbose: print('{} not submitted'.format(scripts)) _job_info =", "input from coords') parser.add_argument('-s', '--submit', action='store_true', help='Automatically submit jobs?') parser.add_argument('-v',", "not None: sfw('cp $CHECKFILE $CURRENTDIR/.\\n\\n') else: sfw('\\n') sfw('echo ran in", "'--time', help='Time required as \"hh:mm:ss\"', default='12:00:00') parser.add_argument('-e', '--executable', type=str, default='g09',", "of the written file :rtype: str \"\"\" if verbose: print('Creating", "name shall I use?') _in_name_list = [rlinput('file name: ', base_name)]", "Example, 'g09', 'g16' :param str chk_file: If not None, this", "]; then\\n'.format( os.path.abspath(make_xyz)) + ' exit 17\\n' 'fi\\n\\n') sfw('module load", "execute a batch job? ') in yes: batch = True", "< 2: # ignore first two lines # number of", "file_name: raise SyntaxError('problem interpreting file name. ' + 'Period in", "in_names: out_name = create_gau_input(in_name, template, verbose=verbose) made_name_list.append(out_name) if verbose: print('Added", "temp_pkl) if chk_file is not None: chk_line = 'checkpoint=\\'{}\\','.format(chk_file) else:", "is deprecated. Use os.path.split instead', DeprecationWarning) if '/' in path:", "coordinates (the form should not matter, it will just be", "be the base name for linking the output file to", "= _dir_and_file(script) with cd(rd, ignore_blank=True): cl = ['qsub', f] #", "# Licensed under the Apache License, Version 2.0 (the \"License\");", "the queuing system. :param str xyz: Name of an xyz", "input_extension = os.path.splitext(file_name) if not short_name + '.' + input_extension", "will be input_name; xyz will be xyz or a time-based", "readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return input(prompt) finally: readline.set_startup_hook() def _dir_and_file(path): warn('_dir_and_file", "<reponame>thompcinnamon/QM-calc-scripts<filename>gautools/submit_gaussian.py<gh_stars>0 #! /usr/bin/env python3 ######################################################################## # # # This script", "for linking output to cwd while ' 'running') parser.add_argument('-d', '--hold_jid',", "file') parser.add_argument('-c', '--numcores', type=int, default=16, help='Number of cores for job')", "'.xyz') if xyz is None or make_xyz is not None:", "be passed to use_gen_template. :param bool copy_chk: If this is", "you mean to execute a batch job? ') in yes:", "pass to use_gen_template. :param bool make_input: If True, use_gen_template will", "in file name?') out_name = short_name + '.out' else: short_name", "str :param mem: Minimum amount of memory to request :param", "sfw('cd $SCRATCHDIR\\n\\n') sfw('cp $CURRENTDIR/$INPUTFILE .\\n') if chk_file is not None:", "the # second sort won't do anything. if not batch:", "chk_file is not None: sfw(' $CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n\\n') sfw('cp $OUTPUTFILE", "open(coord_name, 'r') as in_file: if verbose: print('opened {}'.format(coord_name)) for i,", "necessary input file. :param str input_name: Name of the file", "\"\"\" Write submission script for (Gaussian) jobs for submission to", "of file with header for Gaussian calculation (up to and", "if verbose: print(output) outputs.append(output) else: if verbose: print('{} not submitted'.format(scripts))", "= '' f_name = path return rel_dir, f_name def create_gau_input(coord_name,", "attempt to copy what should be an existing checkpoint file", "linked with the same base name. :param str hold_jid: Job", "# ignore first two lines # number of atoms and", "create_gau_input(in_name, template, verbose=verbose) made_name_list.append(out_name) if verbose: print('Added {} to files", "the same length and the # second sort won't do", "re.match(r'.*?([a-zA-Z].*)', short_name).group(1) if len(job_name) == 0: job_name = 'default' _script_name", "parsing commandline arguments import datetime import glob # Allows referencing", "file to use as input :param int num_cores: Number of", "######################################################################## # # # This script was written by <NAME>", "will be xyz or a time-based name if make_xyz is", "= [] for in_name in in_name_list: script_name = write_sub_script(input_name=in_name, num_cores=args.numcores,", "return _out_name def get_input_files(base_name, batch): _in_name_list = glob.glob(base_name + '*')", "# # # # Unless required by applicable law or", "distributed on an \"AS IS\" BASIS, # # WITHOUT WARRANTIES", "License. # # # ######################################################################## # This is written to", "because it just won't # submit the job and it", "{}\\n\\n'.format(os.path.abspath( make_xyz), os.path.abspath(n_xyz))) if make_input: sfw('python -c \"from gautools.tools import", "batch def use_template(template, in_names, verbose): made_name_list = [] for in_name", "True, some status messages will be printed (including file names)", "to use as input to use_gen_template (if make_input is True).", "is not None: chk_line = 'checkpoint=\\'{}\\','.format(chk_file) else: chk_line = ''", "for the job Example, 'g09', 'g16' :param str chk_file: If", "print('Assuming input file is {}'.format(file_name)) out_name = short_name + '.out'", "to make sure everything input gets a script and all", "and the # second sort won't do anything. if not", "check validity of this time request # Maybe it doesn't", "temp_xyz[:-4] if ugt_dict is not None: make_obj_dir() pkl_path = save_obj(ugt_dict,", "not None: sfw('WORKINGOUT={}.out\\n'.format(ln_running)) if chk_file is not None: sfw('WORKINGCHK={}.chk\\n\\n'.format(ln_running)) else:", "Write submission script for (Gaussian) jobs for submission to queue", "for script in scripts: rd, f = _dir_and_file(script) with cd(rd,", "request in the format 'hh:mm:ss' :param bool verbose: If True,", "running the job. `chk_file` must be not None as well.", "(the form should not matter, it will just be copied", "I should probably check validity of this time request #", "will also be passed to use_gen_template. :param bool copy_chk: If", "job submission); mem will be mem; and checkpoint will be", "else: sfw('\\n') sfw('echo ran in /net/`hostname -s`$SCRATCHDIR\\n') sfw('echo output was", "None: sfw('WORKINGCHK={}.chk\\n\\n'.format(ln_running)) else: sfw('\\n') sfw('CURRENTDIR=`pwd`\\n') sfw('SCRATCHDIR=/scratch/$USER\\n') sfw('mkdir -p $SCRATCHDIR\\n\\n') sfw('cd", "should all be the same length and the # second", "make_xyz will be checked to exist first to make sure", "if verbose: print('Creating Gaussian input file...') _out_name = coord_name.rsplit('.', 1)[0]", "job Example, 'g09', 'g16' :param str chk_file: If not None,", "to and including the charge and multiplicity) :param bool verbose:", "of the file, and creates a Gaussian input file ending", "and multiplicity) :param bool verbose: If True, some status messages", "will be the base name for linking the output file", "script_name = write_sub_script(input_name=in_name, num_cores=args.numcores, time=args.time, verbose=args.verbose, executable=args.executable, chk_file=args.chk_file, copy_chk=args.copy_chk, ln_running=args.ln_running,", "feedback about that? parser.add_argument('-t', '--time', help='Time required as \"hh:mm:ss\"', default='12:00:00')", "back') parser.add_argument('--copy_chk', action='store_true', help='Copy check file to the scratch directory')", "but scripts created') else: if submit or input('submit job {}?", "is True, the script will attempt to copy what should", "for the desired calculation (including charge and multiplicity), returns the", "'m = get_node_mem();\\n' 'd = load_obj(\\'{}\\');\\n'.format( os.path.abspath(pkl_path)) + 'ugt(\\'{}\\',\\'{}\\','.format( file_name,", "sort files alphanumerically _in_name_list.sort(key=len) # sort by length (because otherwise", "$NSLOTS (useful if this gets changed after job submission); mem", "input_extension == file_name: raise SyntaxError('problem interpreting file name. ' +", "num_cores=args.numcores, time=args.time, verbose=args.verbose, executable=args.executable, chk_file=args.chk_file, copy_chk=args.copy_chk, ln_running=args.ln_running, hold_jid=args.hold_jid) script_list.append(script_name) if", "give quick feedback about that? parser.add_argument('-t', '--time', help='Time required as", "in path: rel_dir, f_name = path.rsplit('/', 1) rel_dir = rel_dir", "open(template, 'r') as templ_file: if verbose: print('opened {}'.format(template)) for line", "rel_dir = rel_dir + '/' else: rel_dir = '' f_name", "either express or # # implied. # # See the", "+ 'Period in file name?') out_name = short_name + '.out'", "if verbose: print('No jobs submitted, but scripts created') else: if", "a comment # # as the second line saying something", "None as well. :param str ln_running: If not None, this", "def submit_scripts(scripts, batch=False, submit=False, verbose=False): outputs = [] if batch:", "_in_name_list = [rlinput('file name: ', base_name)] return _in_name_list, batch def", "printed (including file names) :return: name of the written file", "will be copied back after the job has completed. If", "__name__ == '__main__': description = 'Create and submit a script", "won't # submit the job and it will give quick", "' 'running') parser.add_argument('-d', '--hold_jid', default=None, help='job on which this job", "xyz will be xyz or a time-based name if make_xyz", "_job_info = [' '.join(output.split(' ')[2:4]) for output in outputs] return", "return input(prompt) finally: readline.set_startup_hook() def _dir_and_file(path): warn('_dir_and_file is deprecated. Use", "not in line: out_file.write('\\n') with open(coord_name, 'r') as in_file: if", "also be linked with the same base name. :param str", "sfw('python -c \"from gautools.tools import ' 'use_gen_template as ugt;\\n' 'from", "if chk_file is not None: sfw(' $CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n\\n') sfw('cp", "= get_node_mem();\\n' 'd = load_obj(\\'{}\\');\\n'.format( os.path.abspath(pkl_path)) + 'ugt(\\'{}\\',\\'{}\\','.format( file_name, os.path.abspath(n_xyz))", "for the specific language governing permissions and # # limitations", "argparse # For parsing commandline arguments import datetime import glob", "from thtools import cd, make_obj_dir, save_obj, resolve_path yes = ['y',", "'.com' == file_name: raise SyntaxError('problem interpreting file name. ' +", "script to run a Gaussian job on SCC' parser =", "sfw('SCRATCHDIR=/scratch/$USER\\n') sfw('mkdir -p $SCRATCHDIR\\n\\n') sfw('cd $SCRATCHDIR\\n\\n') sfw('cp $CURRENTDIR/$INPUTFILE .\\n') if", "str time: Amount of time to request in the format", "Gaussian calculation. :param dict ugt_dict: dict of arguments to pass", "messages will be printed (including file names) :return: name of", "use for the job Example, 'g09', 'g16' :param str chk_file:", "import readline # Allows easier file input (with tab completion?)", "and make_input is True, this will also be passed to", "verbose: If True, print out some status messages and such", "a Gaussian input file ending with '.com' :param str coord_name:", "+ '/' else: rel_dir = '' f_name = path return", "on which this job should depend. This should be the", "os.path.abspath(make_xyz)) + ' exit 17\\n' 'fi\\n\\n') sfw('module load wxwidgets/3.0.2\\n') sfw('module", "verbose: print('No jobs submitted, but scripts created') else: if submit", "raw_input is gone def rlinput(prompt, prefill=''): readline.set_startup_hook(lambda: readline.insert_text(prefill)) try: return", "'1'] # An input function that can prefill in the", "a necessary input file. :param str input_name: Name of the", "messages and such :type mem: int or str :param mem:", "+ 'ugt(\\'{}\\',\\'{}\\','.format( file_name, os.path.abspath(n_xyz)) + 'nproc=$NSLOTS,mem=m,{}'.format(chk_line) + '**d)\"\\n\\n') sfw('INPUTFILE={}\\n'.format(file_name)) sfw('OUTPUTFILE={}\\n'.format(out_name))", "of this time request # Maybe it doesn't matter so", "help='Time required as \"hh:mm:ss\"', default='12:00:00') parser.add_argument('-e', '--executable', type=str, default='g09', help='name", "parser.add_argument('-d', '--hold_jid', default=None, help='job on which this job should depend')", "make_xyz is not None: sfw('if [ ! -f {} ];", "returns the name of the file, and creates a Gaussian", "ln_running=None, hold_jid=None, xyz=None, make_xyz=None, make_input=False, ugt_dict=None): \"\"\" Write submission script", "be an existing checkpoint file to the scratch directory before", "not None: sfw('rm $CURRENTDIR/$WORKINGOUT') if chk_file is not None: sfw('", "= short_name + '.out' else: short_name = file_name file_name =", "'default' _script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh') temp_xyz = os.path.abspath('.temp' + datetime.datetime.now().strftime('%H%M%S%f')", "$CURRENTDIR/$WORKINGOUT') if chk_file is not None: sfw(' $CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n\\n')", "== '__main__': description = 'Create and submit a script to", "out some status messages and such :type mem: int or", "mem='125', executable='g09', chk_file=None, copy_chk=False, ln_running=None, hold_jid=None, xyz=None, make_xyz=None, make_input=False, ugt_dict=None):", "linking the output file to the current directory. If chk_file", "a set of molecular coordinates (the form should not matter,", "$SCRATCHDIR\\n\\n') sfw('cd $SCRATCHDIR\\n\\n') sfw('cp $CURRENTDIR/$INPUTFILE .\\n') if chk_file is not", "input (with tab completion?) import subprocess # Allows for submitting", "length (because otherwise would # put 1,10,11,... as opposed to", "resolve_path yes = ['y', 'yes', '1'] # An input function", "be copied back after the job has completed. If this", "BASIS, # # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "and including the charge and multiplicity) :param bool verbose: If", "{} -O {}\\n\\n'.format(os.path.abspath( make_xyz), os.path.abspath(n_xyz))) if make_input: sfw('python -c \"from", "or # # implied. # # See the License for", "in_name_list = use_template(args.template, in_name_list, args.verbose) script_list = [] for in_name", "out_file.write(line) if '\\n' not in line: out_file.write('\\n') with open(coord_name, 'r')", "text entry # Not sure if this works in 3.5+", "verbose: print('Added {} to files to possibly submit.'.format(out_name)) _in_name_list =", "input('submit all jobs? ') in yes: for script in scripts:", "# line.strip().startswith('generated'): # continue # else: out_file.write(line) out_file.write('\\n\\n\\n') if verbose:", "is not None: sfw('CHECKFILE={}\\n\\n'.format(chk_file)) else: sfw('\\n') if ln_running is not", "else: rel_dir = '' f_name = path return rel_dir, f_name", "This should never be the case as far as I", "'checkpoint=\\'{}\\','.format(chk_file) else: chk_line = '' with open(_script_name, 'w') as script_file:", "not batch: num_files = len(_in_name_list) if num_files > 1: print('Multiple", "submit the job and it will give quick feedback about", "import warn from thtools import cd, make_obj_dir, save_obj, resolve_path yes", ":param str input_name: Name of the file to use as", "output file to the current directory. If chk_file is not", "# # See the License for the specific language governing", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or #", "glob.glob(base_name + '*') _in_name_list.sort() # sort files alphanumerically _in_name_list.sort(key=len) #", "# Allows for submitting commands to the shell from warnings", "Gaussian job on SCC' parser = argparse.ArgumentParser(description=description) parser.add_argument('in_name', help='Name of", "the current directory. If chk_file is not None, it will", "should not matter, it will just be copied into the", "def _dir_and_file(path): warn('_dir_and_file is deprecated. Use os.path.split instead', DeprecationWarning) if", "sfw('cp $CURRENTDIR/$INPUTFILE .\\n') if chk_file is not None: sfw('# ')", "sfw('\\n') sfw('echo About to run {} in /net/`'.format(executable) + 'hostname", "request # Maybe it doesn't matter so much because it", "parser.add_argument('-s', '--submit', action='store_true', help='Automatically submit jobs?') parser.add_argument('-v', '--verbose', action='store_true', help='make", "xyz or a time-based name if make_xyz is not None;", "or agreed to in writing, software # # distributed under", "status messages will be printed (including file names) :return: name", "a file with a set of molecular coordinates (the form", "for job') # I should probably check validity of this", "name?') out_name = short_name + '.out' else: short_name = file_name", "[ ! -f {} ]; then\\n'.format( os.path.abspath(make_xyz)) + ' exit", "and coordinates files This function takes as input a file", "For parsing commandline arguments import datetime import glob # Allows", "Version 2.0 (the \"License\"); # # you may not use", "use_gen_template will be used to create input for the Gaussian", "to copy what should be an existing checkpoint file to", "if chk_file is not None: chk_line = 'checkpoint=\\'{}\\','.format(chk_file) else: chk_line", "to cwd while ' 'running') parser.add_argument('-d', '--hold_jid', default=None, help='job on", "# Not sure if this works in 3.5+ because raw_input", "sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE ' '$CURRENTDIR/$WORKINGOUT\\n') if chk_file is", "by applicable law or agreed to in writing, software #", "(the \"License\"); # # you may not use this file", "short_name + '.out' job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1) if len(job_name) ==", "parser.add_argument('in_name', help='Name of Gaussian input file') parser.add_argument('-c', '--numcores', type=int, default=16,", "an \"AS IS\" BASIS, # # WITHOUT WARRANTIES OR CONDITIONS", ":param bool copy_chk: If this is True, the script will", "script_file: sfw = script_file.write sfw('#!/bin/bash -l\\n\\n') sfw('#$ -pe omp {}\\n'.format(num_cores))", "put 1,10,11,... as opposed to 1,...,9,10,... # if number 01,02,...", "[] if batch: if submit or input('submit all jobs? ')", "None: make_obj_dir() pkl_path = save_obj(ugt_dict, temp_pkl) if chk_file is not", "make sure to not waste time when missing a necessary", "_in_name_list.sort() _in_name_list.sort(key=len) return _in_name_list def write_sub_script(input_name, num_cores=16, time='12:00:00', verbose=False, mem='125',", "the Gaussian calculation. :param dict ugt_dict: dict of arguments to", "out_name = short_name + '.out' job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1) if", "not submitted'.format(scripts)) _job_info = [' '.join(output.split(' ')[2:4]) for output in", "= submit_scripts(script_list, args.batch, args.submit, args.verbose) if job_info and args.nojobinfo: for", "IOError('num scripts dif. from num names given') job_info = submit_scripts(script_list,", "checkpoint will be chk_file. :return: The name of the script", "-l h_rt={}\\n'.format(time)) sfw('#$ -l mem_total={}G\\n'.format(mem)) sfw('#$ -N {}\\n'.format(job_name)) sfw('#$ -j", "are there to be submitted. raise IOError('num scripts dif. from", "> 1: print('Multiple files starting with {}'.format(base_name)) if input('Did you", "rel_dir, f_name = path.rsplit('/', 1) rel_dir = rel_dir + '/'", "file that should be the header for the desired calculation", "temp_pkl = temp_xyz[:-4] if ugt_dict is not None: make_obj_dir() pkl_path", "script in scripts: rd, f = _dir_and_file(script) with cd(rd, ignore_blank=True):", "# I should probably check validity of this time request", "# <EMAIL> <EMAIL> # # # # Copyright 2015 <NAME>", "of cores for job') # I should probably check validity", "the text entry # Not sure if this works in", "if chk_file is not None: sfw('WORKINGCHK={}.chk\\n\\n'.format(ln_running)) else: sfw('\\n') sfw('CURRENTDIR=`pwd`\\n') sfw('SCRATCHDIR=/scratch/$USER\\n')", "is not None, the file make_xyz will be checked to", "to {}'.format(_script_name)) return _script_name def submit_scripts(scripts, batch=False, submit=False, verbose=False): outputs", "sfw('#$ -pe omp {}\\n'.format(num_cores)) sfw('#$ -M <EMAIL>\\n') sfw('#$ -m eas\\n')", "print('{} not submitted'.format(scripts)) _job_info = [' '.join(output.split(' ')[2:4]) for output", "some status messages will be printed (including file names) :return:", "'--batch', action='store_true', help='create multiple scripts (batch job)') parser.add_argument('-x', '--template', default=None,", "'.out' job_name = re.match(r'.*?([a-zA-Z].*)', short_name).group(1) if len(job_name) == 0: job_name", "' '$CURRENTDIR/$WORKINGOUT\\n') if chk_file is not None: sfw('ln -s -b", "batch job? ') in yes: batch = True else: print('What", "resolve_path(xyz) temp_pkl = temp_xyz[:-4] if ugt_dict is not None: make_obj_dir()", "description = 'Create and submit a script to run a", "default=None, help='template file for creating input from coords') parser.add_argument('-s', '--submit',", "None: chk_line = 'checkpoint=\\'{}\\','.format(chk_file) else: chk_line = '' with open(_script_name,", "cd(rd, ignore_blank=True): cl = ['qsub', f] # Don't really know", "file will be copied back after the job has completed.", "cl = ['qsub', f] # Don't really know how this", "-p $SCRATCHDIR\\n\\n') sfw('cd $SCRATCHDIR\\n\\n') sfw('cp $CURRENTDIR/$INPUTFILE .\\n') if chk_file is", "with '.com' :param str coord_name: name of file with coordinates", "this function. out_file will be input_name; xyz will be xyz", "action='store_false', help='Do not return the submitted job information') parser.add_argument('-k', '--chk_file',", "Maybe it doesn't matter so much because it just won't", "header for Gaussian calculation (up to and including the charge", "is not None: make_obj_dir() pkl_path = save_obj(ugt_dict, temp_pkl) if chk_file", "+ input_extension == file_name: raise SyntaxError('problem interpreting file name. '", "print('opened {}'.format(coord_name)) for i, line in enumerate(in_file): if i <", "file {}'.format(_out_name)) return _out_name def get_input_files(base_name, batch): _in_name_list = glob.glob(base_name", "the script will attempt to copy what should be an", "# if line.strip().isdigit(): # # the first line is the", ":param bool make_input: If True, use_gen_template will be used to", "with open(coord_name, 'r') as in_file: if verbose: print('opened {}'.format(coord_name)) for", "executable: Executable file to use for the job Example, 'g09',", "this file will be copied back after the job has", "ugt;\\n' 'from thtools import load_obj, get_node_mem;\\n' 'm = get_node_mem();\\n' 'd", "parser.add_argument('-l', '--ln_running', type=str, default=None, help='base name for linking output to", "template, verbose=verbose) made_name_list.append(out_name) if verbose: print('Added {} to files to", "# You may obtain a copy of the License at", "print(output) outputs.append(output) else: if verbose: print('No jobs submitted, but scripts", "# # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "This function takes as input a file with a set", "file name. ' + 'Period in file name?') out_name =", "+ '.' + input_extension == file_name: raise SyntaxError('problem interpreting file", "sfw = script_file.write sfw('#!/bin/bash -l\\n\\n') sfw('#$ -pe omp {}\\n'.format(num_cores)) sfw('#$", "None: sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE ' '$CURRENTDIR/$WORKINGOUT\\n') if chk_file", "# continue # # XYZ files created by mathematica have", "be used to create input for the Gaussian calculation. :param", "input file {}'.format(_out_name)) return _out_name def get_input_files(base_name, batch): _in_name_list =", "else: sfw('\\n') sfw('echo About to run {} in /net/`'.format(executable) +", "= 'default' _script_name = os.path.join(rel_dir, 'submit'+short_name+'.sh') temp_xyz = os.path.abspath('.temp' +", "not None as well. :param str ln_running: If not None,", "not None: sfw('#$ -hold_jid {}\\n\\n'.format(hold_jid)) if make_xyz is not None:", "calculation (up to and including the charge and multiplicity) :param", "outputs.append(output) else: if verbose: print('{} not submitted'.format(scripts)) _job_info = ['", "should depend') args = parser.parse_args() in_name_list, args.batch = get_input_files(args.in_name, args.batch)", "be submitted. raise IOError('num scripts dif. from num names given')", "len(in_name_list): # This should never be the case as far", "script names are there to be submitted. raise IOError('num scripts", "-N {}\\n'.format(job_name)) sfw('#$ -j y\\n') sfw('#$ -o {}.log\\n\\n'.format(short_name)) if hold_jid", "name of the file, and creates a Gaussian input file", "request :param str executable: Executable file to use for the", "submission); mem will be mem; and checkpoint will be chk_file.", "version of python. from __future__ import print_function import argparse #", "to use_gen_template. :param bool make_input: If True, use_gen_template will be", "memory to request :param str executable: Executable file to use", "job should depend') args = parser.parse_args() in_name_list, args.batch = get_input_files(args.in_name,", "IV # # # # Licensed under the Apache License,", "to files to possibly submit.'.format(out_name)) _in_name_list = made_name_list _in_name_list.sort() _in_name_list.sort(key=len)", "in yes: batch = True else: print('What file name shall", "it doesn't matter so much because it just won't #", "return the submitted job information') parser.add_argument('-k', '--chk_file', default=None, help='checkpoint file", "_in_name_list.sort(key=len) # sort by length (because otherwise would # put", "input a file with a set of molecular coordinates (the", "# if line.strip().startswith('Create') or # line.strip().startswith('generated'): # continue # else:", "under the License. # # # ######################################################################## # This is", "from # http://stackoverflow.com/questions/4256107/ # running-bash-commands-in-python process = subprocess.Popen(cl, stdout=subprocess.PIPE, universal_newlines=True)", "# # # # Licensed under the Apache License, Version", "-f {} ]; then\\n'.format( os.path.abspath(make_xyz)) + ' exit 17\\n' 'fi\\n\\n')", "coord_name.rsplit('.', 1)[0] + '.com' with open(_out_name, 'w') as out_file: with", "for in_name in in_name_list: script_name = write_sub_script(input_name=in_name, num_cores=args.numcores, time=args.time, verbose=args.verbose,", "as ugt;\\n' 'from thtools import load_obj, get_node_mem;\\n' 'm = get_node_mem();\\n'", "same length and the # second sort won't do anything.", "to run a Gaussian job on SCC' parser = argparse.ArgumentParser(description=description)", "warn('_dir_and_file is deprecated. Use os.path.split instead', DeprecationWarning) if '/' in", "input_name; xyz will be xyz or a time-based name if", "+ '.xyz') if xyz is None or make_xyz is not", "None: sfw('WORKINGOUT={}.out\\n'.format(ln_running)) if chk_file is not None: sfw('WORKINGCHK={}.chk\\n\\n'.format(ln_running)) else: sfw('\\n')", "1,10,11,... as opposed to 1,...,9,10,... # if number 01,02,... They", "= os.path.join(rel_dir, 'submit'+short_name+'.sh') temp_xyz = os.path.abspath('.temp' + datetime.datetime.now().strftime('%H%M%S%f') + '.xyz')", "it should be good to # be working on the", "if xyz is None or make_xyz is not None: n_xyz", "help='checkpoint file to be written and copied back') parser.add_argument('--copy_chk', action='store_true',", "args.batch, args.submit, args.verbose) if job_info and args.nojobinfo: for job in", "make_xyz), os.path.abspath(n_xyz))) if make_input: sfw('python -c \"from gautools.tools import '", ":return: The name of the script file :rtype: str \"\"\"", "is not None: sfw('ln -s -b /net/`hostname -s`$PWD/$OUTPUTFILE ' '$CURRENTDIR/$WORKINGOUT\\n')", "str executable: Executable file to use for the job Example,", "is the number of atoms # continue # # XYZ", "num_files > 1: print('Multiple files starting with {}'.format(base_name)) if input('Did", "-s -b /net/`hostname -s`$PWD/$CHECKFILE ' '$CURRENTDIR/$WORKINGCHK\\n\\n') else: sfw('\\n') sfw('echo About", "verbose): made_name_list = [] for in_name in in_names: out_name =", "Executable file to use for the job Example, 'g09', 'g16'", "'--template', default=None, help='template file for creating input from coords') parser.add_argument('-s',", "be the case as far as I know, but I", "and checkpoint will be chk_file. :return: The name of the", "file_name: short_name, input_extension = os.path.splitext(file_name) if not short_name + '.'", "= save_obj(ugt_dict, temp_pkl) if chk_file is not None: chk_line =", "the next file) and a template file that should be", "print('Added {} to files to possibly submit.'.format(out_name)) _in_name_list = made_name_list", "else: chk_line = '' with open(_script_name, 'w') as script_file: sfw", "as \"hh:mm:ss\"', default='12:00:00') parser.add_argument('-e', '--executable', type=str, default='g09', help='name of executable", "in the queuing system. :param str xyz: Name of an", "verbose: print('opened {}'.format(coord_name)) for i, line in enumerate(in_file): if i", "= temp_xyz else: n_xyz = resolve_path(xyz) temp_pkl = temp_xyz[:-4] if", "'g16' :param str chk_file: If not None, this file will" ]
[ "= np.array([tt[1] for tt in t]).astype(np.float32) rwd = np.array([tt[2] for", "[] return def add(self, o, a, r, d): # self._cur_traj.append((o,", "# pdb.set_trace() traj.append({ 'observations': obs[:-1], 'next_observations': obs[1:], 'actions': act[:-1], 'rewards':", "done[:-1] }) with open('{}.pkl'.format(filename), 'wb') as outfile: pickle.dump(traj, outfile) return", "in t]) # pdb.set_trace() traj.append({ 'observations': obs[:-1], 'next_observations': obs[1:], 'actions':", "# self._cur_traj.append((o, a, r, d)) self._cur_traj.append( (listify_mat(o), listify_mat(a), listify_mat(r), d))", "}) with open('{}.pkl'.format(filename), 'wb') as outfile: pickle.dump(traj, outfile) return def", "t]) # pdb.set_trace() traj.append({ 'observations': obs[:-1], 'next_observations': obs[1:], 'actions': act[:-1],", "import numpy as np import pickle def listify_mat(matrix): matrix =", "'next_observations': obs[1:], 'actions': act[:-1], 'rewards': rwd[:-1], 'terminals': done[:-1] }) with", "= np.array(matrix).astype(str) if len(matrix.shape) > 1: matrix_list = [] for", "if len(matrix.shape) > 1: matrix_list = [] for row in", "traj = {'traj': []} for t in self._traj: traj['traj'].append(t) #", "= {'traj': []} for t in self._traj: traj['traj'].append(t) # json.dumps(traj,", "return def pack_traj(self): self._traj.append(copy.deepcopy(self._cur_traj)) self._cur_traj = [] return def add(self,", "self._traj: obs = np.array([tt[0] for tt in t]).astype(np.float32) act =", "obs[1:], 'actions': act[:-1], 'rewards': rwd[:-1], 'terminals': done[:-1] }) with open('{}.pkl'.format(filename),", "'actions': act[:-1], 'rewards': rwd[:-1], 'terminals': done[:-1] }) with open('{}.pkl'.format(filename), 'wb')", "d): # self._cur_traj.append((o, a, r, d)) self._cur_traj.append( (listify_mat(o), listify_mat(a), listify_mat(r),", "for tt in t]) # pdb.set_trace() traj.append({ 'observations': obs[:-1], 'next_observations':", "as np import pickle def listify_mat(matrix): matrix = np.array(matrix).astype(str) if", "for tt in t]).astype(np.float32) act = np.array([tt[1] for tt in", "np.array([tt[0] for tt in t]).astype(np.float32) act = np.array([tt[1] for tt", "(listify_mat(o), listify_mat(a), listify_mat(r), d)) return def export_pickle(self, filename='traj'): if filename", "np.array(matrix).astype(str) if len(matrix.shape) > 1: matrix_list = [] for row", "filename == '': raise ValueError('incorrect file name') traj = {'traj':", "[]} for t in self._traj: traj['traj'].append(t) # json.dumps(traj, sort_keys=True, indent=4)", "o, a, r, d): # self._cur_traj.append((o, a, r, d)) self._cur_traj.append(", "file name') traj = [] for t in self._traj: obs", "self._cur_traj.append( (listify_mat(o), listify_mat(a), listify_mat(r), d)) return def export_pickle(self, filename='traj'): if", "def export(self, filename='traj'): if filename == '': raise ValueError('incorrect file", "except: pdb.set_trace() return matrix_list else: return list(matrix) class Recorder(): def", "traj['traj'].append(t) # json.dumps(traj, sort_keys=True, indent=4) pdb.set_trace() with open('{}.json'.format(filename), 'w') as", "'observations': obs[:-1], 'next_observations': obs[1:], 'actions': act[:-1], 'rewards': rwd[:-1], 'terminals': done[:-1]", "'terminals': done[:-1] }) with open('{}.pkl'.format(filename), 'wb') as outfile: pickle.dump(traj, outfile)", "act = np.array([tt[1] for tt in t]).astype(np.float32) rwd = np.array([tt[2]", "= np.array([tt[3] for tt in t]) # pdb.set_trace() traj.append({ 'observations':", "export_pickle(self, filename='traj'): if filename == '': raise ValueError('incorrect file name')", "matrix_list = [] for row in matrix: try: matrix_list.append(list(row)) except:", "[] for row in matrix: try: matrix_list.append(list(row)) except: pdb.set_trace() return", "t]).astype(np.float32) act = np.array([tt[1] for tt in t]).astype(np.float32) rwd =", "= [] for row in matrix: try: matrix_list.append(list(row)) except: pdb.set_trace()", "filename == '': raise ValueError('incorrect file name') traj = []", "def add(self, o, a, r, d): # self._cur_traj.append((o, a, r,", "self._traj, self._cur_traj = [], [] return def pack_traj(self): self._traj.append(copy.deepcopy(self._cur_traj)) self._cur_traj", "r, d)) self._cur_traj.append( (listify_mat(o), listify_mat(a), listify_mat(r), d)) return def export_pickle(self,", "sort_keys=True, indent=4) pdb.set_trace() with open('{}.json'.format(filename), 'w') as outfile: json.dump(traj, outfile)", "json import copy import pdb import numpy as np import", "'': raise ValueError('incorrect file name') traj = [] for t", "copy import pdb import numpy as np import pickle def", "row in matrix: try: matrix_list.append(list(row)) except: pdb.set_trace() return matrix_list else:", "t]).astype(np.float32) rwd = np.array([tt[2] for tt in t]).astype(np.float32) done =", "done = np.array([tt[3] for tt in t]) # pdb.set_trace() traj.append({", "import pickle def listify_mat(matrix): matrix = np.array(matrix).astype(str) if len(matrix.shape) >", "= [] return def add(self, o, a, r, d): #", "return list(matrix) class Recorder(): def __init__(self): self._traj, self._cur_traj = [],", "self._traj: traj['traj'].append(t) # json.dumps(traj, sort_keys=True, indent=4) pdb.set_trace() with open('{}.json'.format(filename), 'w')", "= np.array([tt[2] for tt in t]).astype(np.float32) done = np.array([tt[3] for", "{'traj': []} for t in self._traj: traj['traj'].append(t) # json.dumps(traj, sort_keys=True,", "pdb import numpy as np import pickle def listify_mat(matrix): matrix", "= [] for t in self._traj: obs = np.array([tt[0] for", "with open('{}.pkl'.format(filename), 'wb') as outfile: pickle.dump(traj, outfile) return def export(self,", "= np.array([tt[0] for tt in t]).astype(np.float32) act = np.array([tt[1] for", "open('{}.pkl'.format(filename), 'wb') as outfile: pickle.dump(traj, outfile) return def export(self, filename='traj'):", "numpy as np import pickle def listify_mat(matrix): matrix = np.array(matrix).astype(str)", "listify_mat(a), listify_mat(r), d)) return def export_pickle(self, filename='traj'): if filename ==", "Recorder(): def __init__(self): self._traj, self._cur_traj = [], [] return def", "traj.append({ 'observations': obs[:-1], 'next_observations': obs[1:], 'actions': act[:-1], 'rewards': rwd[:-1], 'terminals':", "raise ValueError('incorrect file name') traj = [] for t in", "listify_mat(r), d)) return def export_pickle(self, filename='traj'): if filename == '':", "= [], [] return def pack_traj(self): self._traj.append(copy.deepcopy(self._cur_traj)) self._cur_traj = []", "for tt in t]).astype(np.float32) done = np.array([tt[3] for tt in", "'rewards': rwd[:-1], 'terminals': done[:-1] }) with open('{}.pkl'.format(filename), 'wb') as outfile:", "tt in t]) # pdb.set_trace() traj.append({ 'observations': obs[:-1], 'next_observations': obs[1:],", "class Recorder(): def __init__(self): self._traj, self._cur_traj = [], [] return", "listify_mat(matrix): matrix = np.array(matrix).astype(str) if len(matrix.shape) > 1: matrix_list =", "d)) self._cur_traj.append( (listify_mat(o), listify_mat(a), listify_mat(r), d)) return def export_pickle(self, filename='traj'):", "for row in matrix: try: matrix_list.append(list(row)) except: pdb.set_trace() return matrix_list", "matrix_list else: return list(matrix) class Recorder(): def __init__(self): self._traj, self._cur_traj", "t in self._traj: traj['traj'].append(t) # json.dumps(traj, sort_keys=True, indent=4) pdb.set_trace() with", "matrix_list.append(list(row)) except: pdb.set_trace() return matrix_list else: return list(matrix) class Recorder():", "pdb.set_trace() return matrix_list else: return list(matrix) class Recorder(): def __init__(self):", "return def export_pickle(self, filename='traj'): if filename == '': raise ValueError('incorrect", "name') traj = [] for t in self._traj: obs =", "np.array([tt[3] for tt in t]) # pdb.set_trace() traj.append({ 'observations': obs[:-1],", "return def export(self, filename='traj'): if filename == '': raise ValueError('incorrect", "self._cur_traj = [] return def add(self, o, a, r, d):", "tt in t]).astype(np.float32) act = np.array([tt[1] for tt in t]).astype(np.float32)", "[] for t in self._traj: obs = np.array([tt[0] for tt", "== '': raise ValueError('incorrect file name') traj = {'traj': []}", "np.array([tt[1] for tt in t]).astype(np.float32) rwd = np.array([tt[2] for tt", "return def add(self, o, a, r, d): # self._cur_traj.append((o, a,", "t in self._traj: obs = np.array([tt[0] for tt in t]).astype(np.float32)", "== '': raise ValueError('incorrect file name') traj = [] for", "tt in t]).astype(np.float32) done = np.array([tt[3] for tt in t])", "act[:-1], 'rewards': rwd[:-1], 'terminals': done[:-1] }) with open('{}.pkl'.format(filename), 'wb') as", "for t in self._traj: traj['traj'].append(t) # json.dumps(traj, sort_keys=True, indent=4) pdb.set_trace()", "t]).astype(np.float32) done = np.array([tt[3] for tt in t]) # pdb.set_trace()", "in t]).astype(np.float32) rwd = np.array([tt[2] for tt in t]).astype(np.float32) done", "def listify_mat(matrix): matrix = np.array(matrix).astype(str) if len(matrix.shape) > 1: matrix_list", "rwd = np.array([tt[2] for tt in t]).astype(np.float32) done = np.array([tt[3]", "pack_traj(self): self._traj.append(copy.deepcopy(self._cur_traj)) self._cur_traj = [] return def add(self, o, a,", "r, d): # self._cur_traj.append((o, a, r, d)) self._cur_traj.append( (listify_mat(o), listify_mat(a),", "ValueError('incorrect file name') traj = {'traj': []} for t in", "matrix = np.array(matrix).astype(str) if len(matrix.shape) > 1: matrix_list = []", "import pdb import numpy as np import pickle def listify_mat(matrix):", "if filename == '': raise ValueError('incorrect file name') traj =", "name') traj = {'traj': []} for t in self._traj: traj['traj'].append(t)", "self._cur_traj.append((o, a, r, d)) self._cur_traj.append( (listify_mat(o), listify_mat(a), listify_mat(r), d)) return", "export(self, filename='traj'): if filename == '': raise ValueError('incorrect file name')", "rwd[:-1], 'terminals': done[:-1] }) with open('{}.pkl'.format(filename), 'wb') as outfile: pickle.dump(traj,", "return matrix_list else: return list(matrix) class Recorder(): def __init__(self): self._traj,", "import json import copy import pdb import numpy as np", "> 1: matrix_list = [] for row in matrix: try:", "outfile: pickle.dump(traj, outfile) return def export(self, filename='traj'): if filename ==", "indent=4) pdb.set_trace() with open('{}.json'.format(filename), 'w') as outfile: json.dump(traj, outfile) return", "list(matrix) class Recorder(): def __init__(self): self._traj, self._cur_traj = [], []", "pickle.dump(traj, outfile) return def export(self, filename='traj'): if filename == '':", "in t]).astype(np.float32) done = np.array([tt[3] for tt in t]) #", "filename='traj'): if filename == '': raise ValueError('incorrect file name') traj", "for tt in t]).astype(np.float32) rwd = np.array([tt[2] for tt in", "self._cur_traj = [], [] return def pack_traj(self): self._traj.append(copy.deepcopy(self._cur_traj)) self._cur_traj =", "import copy import pdb import numpy as np import pickle", "pickle def listify_mat(matrix): matrix = np.array(matrix).astype(str) if len(matrix.shape) > 1:", "a, r, d)) self._cur_traj.append( (listify_mat(o), listify_mat(a), listify_mat(r), d)) return def", "def pack_traj(self): self._traj.append(copy.deepcopy(self._cur_traj)) self._cur_traj = [] return def add(self, o,", "add(self, o, a, r, d): # self._cur_traj.append((o, a, r, d))", "'wb') as outfile: pickle.dump(traj, outfile) return def export(self, filename='traj'): if", "in matrix: try: matrix_list.append(list(row)) except: pdb.set_trace() return matrix_list else: return", "np import pickle def listify_mat(matrix): matrix = np.array(matrix).astype(str) if len(matrix.shape)", "else: return list(matrix) class Recorder(): def __init__(self): self._traj, self._cur_traj =", "np.array([tt[2] for tt in t]).astype(np.float32) done = np.array([tt[3] for tt", "def __init__(self): self._traj, self._cur_traj = [], [] return def pack_traj(self):", "def export_pickle(self, filename='traj'): if filename == '': raise ValueError('incorrect file", "obs[:-1], 'next_observations': obs[1:], 'actions': act[:-1], 'rewards': rwd[:-1], 'terminals': done[:-1] })", "len(matrix.shape) > 1: matrix_list = [] for row in matrix:", "tt in t]).astype(np.float32) rwd = np.array([tt[2] for tt in t]).astype(np.float32)", "outfile) return def export(self, filename='traj'): if filename == '': raise", "# json.dumps(traj, sort_keys=True, indent=4) pdb.set_trace() with open('{}.json'.format(filename), 'w') as outfile:", "for t in self._traj: obs = np.array([tt[0] for tt in", "json.dumps(traj, sort_keys=True, indent=4) pdb.set_trace() with open('{}.json'.format(filename), 'w') as outfile: json.dump(traj,", "a, r, d): # self._cur_traj.append((o, a, r, d)) self._cur_traj.append( (listify_mat(o),", "[], [] return def pack_traj(self): self._traj.append(copy.deepcopy(self._cur_traj)) self._cur_traj = [] return", "[] return def pack_traj(self): self._traj.append(copy.deepcopy(self._cur_traj)) self._cur_traj = [] return def", "traj = [] for t in self._traj: obs = np.array([tt[0]", "pdb.set_trace() traj.append({ 'observations': obs[:-1], 'next_observations': obs[1:], 'actions': act[:-1], 'rewards': rwd[:-1],", "as outfile: pickle.dump(traj, outfile) return def export(self, filename='traj'): if filename", "file name') traj = {'traj': []} for t in self._traj:", "self._traj.append(copy.deepcopy(self._cur_traj)) self._cur_traj = [] return def add(self, o, a, r,", "1: matrix_list = [] for row in matrix: try: matrix_list.append(list(row))", "'': raise ValueError('incorrect file name') traj = {'traj': []} for", "raise ValueError('incorrect file name') traj = {'traj': []} for t", "d)) return def export_pickle(self, filename='traj'): if filename == '': raise", "in self._traj: obs = np.array([tt[0] for tt in t]).astype(np.float32) act", "ValueError('incorrect file name') traj = [] for t in self._traj:", "obs = np.array([tt[0] for tt in t]).astype(np.float32) act = np.array([tt[1]", "try: matrix_list.append(list(row)) except: pdb.set_trace() return matrix_list else: return list(matrix) class", "matrix: try: matrix_list.append(list(row)) except: pdb.set_trace() return matrix_list else: return list(matrix)", "in self._traj: traj['traj'].append(t) # json.dumps(traj, sort_keys=True, indent=4) pdb.set_trace() with open('{}.json'.format(filename),", "__init__(self): self._traj, self._cur_traj = [], [] return def pack_traj(self): self._traj.append(copy.deepcopy(self._cur_traj))", "in t]).astype(np.float32) act = np.array([tt[1] for tt in t]).astype(np.float32) rwd" ]
[ "mywriter.writerow([ fake.name(), fake.random_int(min=18, max=80, step=1), fake.street_address(), fake.city(), fake.state(), fake.zipcode(), fake.longitude(),", "'age', 'street', 'city', 'state', 'zip', 'lng', 'lat'] mywriter=csv.writer(output) mywriter.writerow(header) for", "fake.random_int(min=18, max=80, step=1), fake.street_address(), fake.city(), fake.state(), fake.zipcode(), fake.longitude(), fake.latitude() ])", "fake = Faker() header = ['name', 'age', 'street', 'city', 'state',", "Faker() header = ['name', 'age', 'street', 'city', 'state', 'zip', 'lng',", "max=80, step=1), fake.street_address(), fake.city(), fake.state(), fake.zipcode(), fake.longitude(), fake.latitude() ]) output.close()", "fake.name(), fake.random_int(min=18, max=80, step=1), fake.street_address(), fake.city(), fake.state(), fake.zipcode(), fake.longitude(), fake.latitude()", "output = open('data.CSV', 'w', newline='') fake = Faker() header =", "'state', 'zip', 'lng', 'lat'] mywriter=csv.writer(output) mywriter.writerow(header) for r in range(1000):", "# Reference: https://pypi.org/project/Faker/ output = open('data.CSV', 'w', newline='') fake =", "https://pypi.org/project/Faker/ output = open('data.CSV', 'w', newline='') fake = Faker() header", "'w', newline='') fake = Faker() header = ['name', 'age', 'street',", "open('data.CSV', 'w', newline='') fake = Faker() header = ['name', 'age',", "'city', 'state', 'zip', 'lng', 'lat'] mywriter=csv.writer(output) mywriter.writerow(header) for r in", "= open('data.CSV', 'w', newline='') fake = Faker() header = ['name',", "mywriter=csv.writer(output) mywriter.writerow(header) for r in range(1000): mywriter.writerow([ fake.name(), fake.random_int(min=18, max=80,", "= ['name', 'age', 'street', 'city', 'state', 'zip', 'lng', 'lat'] mywriter=csv.writer(output)", "= Faker() header = ['name', 'age', 'street', 'city', 'state', 'zip',", "'zip', 'lng', 'lat'] mywriter=csv.writer(output) mywriter.writerow(header) for r in range(1000): mywriter.writerow([", "Reference: https://pypi.org/project/Faker/ output = open('data.CSV', 'w', newline='') fake = Faker()", "for r in range(1000): mywriter.writerow([ fake.name(), fake.random_int(min=18, max=80, step=1), fake.street_address(),", "import Faker import csv # Reference: https://pypi.org/project/Faker/ output = open('data.CSV',", "header = ['name', 'age', 'street', 'city', 'state', 'zip', 'lng', 'lat']", "faker import Faker import csv # Reference: https://pypi.org/project/Faker/ output =", "in range(1000): mywriter.writerow([ fake.name(), fake.random_int(min=18, max=80, step=1), fake.street_address(), fake.city(), fake.state(),", "Faker import csv # Reference: https://pypi.org/project/Faker/ output = open('data.CSV', 'w',", "newline='') fake = Faker() header = ['name', 'age', 'street', 'city',", "['name', 'age', 'street', 'city', 'state', 'zip', 'lng', 'lat'] mywriter=csv.writer(output) mywriter.writerow(header)", "range(1000): mywriter.writerow([ fake.name(), fake.random_int(min=18, max=80, step=1), fake.street_address(), fake.city(), fake.state(), fake.zipcode(),", "'lng', 'lat'] mywriter=csv.writer(output) mywriter.writerow(header) for r in range(1000): mywriter.writerow([ fake.name(),", "'lat'] mywriter=csv.writer(output) mywriter.writerow(header) for r in range(1000): mywriter.writerow([ fake.name(), fake.random_int(min=18,", "mywriter.writerow(header) for r in range(1000): mywriter.writerow([ fake.name(), fake.random_int(min=18, max=80, step=1),", "r in range(1000): mywriter.writerow([ fake.name(), fake.random_int(min=18, max=80, step=1), fake.street_address(), fake.city(),", "from faker import Faker import csv # Reference: https://pypi.org/project/Faker/ output", "import csv # Reference: https://pypi.org/project/Faker/ output = open('data.CSV', 'w', newline='')", "csv # Reference: https://pypi.org/project/Faker/ output = open('data.CSV', 'w', newline='') fake", "<reponame>ifekxp/data from faker import Faker import csv # Reference: https://pypi.org/project/Faker/", "'street', 'city', 'state', 'zip', 'lng', 'lat'] mywriter=csv.writer(output) mywriter.writerow(header) for r" ]
[ "from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies", "# Generated by Django 2.2.4 on 2019-10-03 21:09 from django.db", "on 2019-10-03 21:09 from django.db import migrations, models import django.db.models.deletion", "by Django 2.2.4 on 2019-10-03 21:09 from django.db import migrations,", "2.2.4 on 2019-10-03 21:09 from django.db import migrations, models import", "2019-10-03 21:09 from django.db import migrations, models import django.db.models.deletion class", "= [ ('ingreso', '0003_auto_20190907_2152'), ] operations = [ migrations.AlterField( model_name='detalle_ingreso',", "import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('ingreso', '0003_auto_20190907_2152'), ]", "('ingreso', '0003_auto_20190907_2152'), ] operations = [ migrations.AlterField( model_name='detalle_ingreso', name='id_prod', field=models.ForeignKey(null=True,", "Migration(migrations.Migration): dependencies = [ ('ingreso', '0003_auto_20190907_2152'), ] operations = [", "[ ('ingreso', '0003_auto_20190907_2152'), ] operations = [ migrations.AlterField( model_name='detalle_ingreso', name='id_prod',", "migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('ingreso',", "operations = [ migrations.AlterField( model_name='detalle_ingreso', name='id_prod', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='producto.Producto'), ),", "= [ migrations.AlterField( model_name='detalle_ingreso', name='id_prod', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='producto.Producto'), ), ]", "models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('ingreso', '0003_auto_20190907_2152'),", "django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies =", "django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ('ingreso', '0003_auto_20190907_2152'), ] operations", "class Migration(migrations.Migration): dependencies = [ ('ingreso', '0003_auto_20190907_2152'), ] operations =", "'0003_auto_20190907_2152'), ] operations = [ migrations.AlterField( model_name='detalle_ingreso', name='id_prod', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE,", "import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [", "dependencies = [ ('ingreso', '0003_auto_20190907_2152'), ] operations = [ migrations.AlterField(", "21:09 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration):", "] operations = [ migrations.AlterField( model_name='detalle_ingreso', name='id_prod', field=models.ForeignKey(null=True, on_delete=django.db.models.deletion.CASCADE, to='producto.Producto'),", "Django 2.2.4 on 2019-10-03 21:09 from django.db import migrations, models", "Generated by Django 2.2.4 on 2019-10-03 21:09 from django.db import" ]
[ "libnao_gpu = misc.load_library(\"libnao_gpu\") td_gpu = tddft_iter(label='water', cd=dname, GPU=True) except: td_gpu", "def test_tddft_iter(self): \"\"\" This is iterative TDDFT with SIESTA starting", "valence + H2 -- 2 electrons self.assertEqual(td.xocc[0].shape[0], 4) self.assertEqual(td.xvrt[0].shape[0], 19)", "td = tddft_iter(label='water', cd=dname) try: from pyscf.lib import misc libnao_gpu", "None: self.assertTrue(hasattr(td_gpu, 'xocc')) self.assertTrue(hasattr(td_gpu, 'xvrt')) self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water: O --", "td_gpu = None class KnowValues(unittest.TestCase): def test_tddft_iter(self): \"\"\" This is", "= misc.load_library(\"libnao_gpu\") td_gpu = tddft_iter(label='water', cd=dname, GPU=True) except: td_gpu =", "in the valence + H2 -- 2 electrons self.assertEqual(td_gpu.xocc[0].shape[0], 4)", "= tddft_iter(label='water', cd=dname, GPU=True) except: td_gpu = None class KnowValues(unittest.TestCase):", "water: O -- 6 electrons in the valence + H2", "electrons in the valence + H2 -- 2 electrons self.assertEqual(td_gpu.xocc[0].shape[0],", "from __future__ import print_function, division import os,unittest from pyscf.nao import", "dn0 = td.apply_rf0(td.moms1[:,0]) def test_tddft_iter_gpu(self): \"\"\" Test GPU version \"\"\"", "self.assertEqual(td.xocc[0].shape[0], 4) self.assertEqual(td.xvrt[0].shape[0], 19) dn0 = td.apply_rf0(td.moms1[:,0]) def test_tddft_iter_gpu(self): \"\"\"", "print_function, division import os,unittest from pyscf.nao import tddft_iter dname =", "self.assertTrue(hasattr(td, 'xocc')) self.assertTrue(hasattr(td, 'xvrt')) self.assertTrue(td.ksn2f.sum()==8.0) # water: O -- 6", "None class KnowValues(unittest.TestCase): def test_tddft_iter(self): \"\"\" This is iterative TDDFT", "6 electrons in the valence + H2 -- 2 electrons", "self.assertEqual(td_gpu.xocc[0].shape[0], 4) self.assertEqual(td_gpu.xvrt[0].shape[0], 19) dn0 = td_gpu.apply_rf0(td_gpu.moms1[:,0]) if __name__ ==", "os,unittest from pyscf.nao import tddft_iter dname = os.path.dirname(os.path.abspath(__file__)) td =", "'xocc')) self.assertTrue(hasattr(td, 'xvrt')) self.assertTrue(td.ksn2f.sum()==8.0) # water: O -- 6 electrons", "iterative TDDFT with SIESTA starting point \"\"\" self.assertTrue(hasattr(td, 'xocc')) self.assertTrue(hasattr(td,", "GPU version \"\"\" if td_gpu is not None: self.assertTrue(hasattr(td_gpu, 'xocc'))", "version \"\"\" if td_gpu is not None: self.assertTrue(hasattr(td_gpu, 'xocc')) self.assertTrue(hasattr(td_gpu,", "dname = os.path.dirname(os.path.abspath(__file__)) td = tddft_iter(label='water', cd=dname) try: from pyscf.lib", "\"\"\" Test GPU version \"\"\" if td_gpu is not None:", "cd=dname, GPU=True) except: td_gpu = None class KnowValues(unittest.TestCase): def test_tddft_iter(self):", "O -- 6 electrons in the valence + H2 --", "H2 -- 2 electrons self.assertEqual(td_gpu.xocc[0].shape[0], 4) self.assertEqual(td_gpu.xvrt[0].shape[0], 19) dn0 =", "is iterative TDDFT with SIESTA starting point \"\"\" self.assertTrue(hasattr(td, 'xocc'))", "td_gpu is not None: self.assertTrue(hasattr(td_gpu, 'xocc')) self.assertTrue(hasattr(td_gpu, 'xvrt')) self.assertTrue(td_gpu.ksn2f.sum()==8.0) #", "def test_tddft_iter_gpu(self): \"\"\" Test GPU version \"\"\" if td_gpu is", "if td_gpu is not None: self.assertTrue(hasattr(td_gpu, 'xocc')) self.assertTrue(hasattr(td_gpu, 'xvrt')) self.assertTrue(td_gpu.ksn2f.sum()==8.0)", "self.assertTrue(hasattr(td_gpu, 'xvrt')) self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water: O -- 6 electrons in", "= os.path.dirname(os.path.abspath(__file__)) td = tddft_iter(label='water', cd=dname) try: from pyscf.lib import", "TDDFT with SIESTA starting point \"\"\" self.assertTrue(hasattr(td, 'xocc')) self.assertTrue(hasattr(td, 'xvrt'))", "in the valence + H2 -- 2 electrons self.assertEqual(td.xocc[0].shape[0], 4)", "'xvrt')) self.assertTrue(td.ksn2f.sum()==8.0) # water: O -- 6 electrons in the", "starting point \"\"\" self.assertTrue(hasattr(td, 'xocc')) self.assertTrue(hasattr(td, 'xvrt')) self.assertTrue(td.ksn2f.sum()==8.0) # water:", "'xvrt')) self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water: O -- 6 electrons in the", "tddft_iter(label='water', cd=dname) try: from pyscf.lib import misc libnao_gpu = misc.load_library(\"libnao_gpu\")", "\"\"\" This is iterative TDDFT with SIESTA starting point \"\"\"", "-- 2 electrons self.assertEqual(td_gpu.xocc[0].shape[0], 4) self.assertEqual(td_gpu.xvrt[0].shape[0], 19) dn0 = td_gpu.apply_rf0(td_gpu.moms1[:,0])", "4) self.assertEqual(td_gpu.xvrt[0].shape[0], 19) dn0 = td_gpu.apply_rf0(td_gpu.moms1[:,0]) if __name__ == \"__main__\":", "import print_function, division import os,unittest from pyscf.nao import tddft_iter dname", "from pyscf.lib import misc libnao_gpu = misc.load_library(\"libnao_gpu\") td_gpu = tddft_iter(label='water',", "\"\"\" self.assertTrue(hasattr(td, 'xocc')) self.assertTrue(hasattr(td, 'xvrt')) self.assertTrue(td.ksn2f.sum()==8.0) # water: O --", "not None: self.assertTrue(hasattr(td_gpu, 'xocc')) self.assertTrue(hasattr(td_gpu, 'xvrt')) self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water: O", "= tddft_iter(label='water', cd=dname) try: from pyscf.lib import misc libnao_gpu =", "point \"\"\" self.assertTrue(hasattr(td, 'xocc')) self.assertTrue(hasattr(td, 'xvrt')) self.assertTrue(td.ksn2f.sum()==8.0) # water: O", "import os,unittest from pyscf.nao import tddft_iter dname = os.path.dirname(os.path.abspath(__file__)) td", "cd=dname) try: from pyscf.lib import misc libnao_gpu = misc.load_library(\"libnao_gpu\") td_gpu", "-- 6 electrons in the valence + H2 -- 2", "tddft_iter dname = os.path.dirname(os.path.abspath(__file__)) td = tddft_iter(label='water', cd=dname) try: from", "os.path.dirname(os.path.abspath(__file__)) td = tddft_iter(label='water', cd=dname) try: from pyscf.lib import misc", "Test GPU version \"\"\" if td_gpu is not None: self.assertTrue(hasattr(td_gpu,", "try: from pyscf.lib import misc libnao_gpu = misc.load_library(\"libnao_gpu\") td_gpu =", "is not None: self.assertTrue(hasattr(td_gpu, 'xocc')) self.assertTrue(hasattr(td_gpu, 'xvrt')) self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water:", "import tddft_iter dname = os.path.dirname(os.path.abspath(__file__)) td = tddft_iter(label='water', cd=dname) try:", "division import os,unittest from pyscf.nao import tddft_iter dname = os.path.dirname(os.path.abspath(__file__))", "= None class KnowValues(unittest.TestCase): def test_tddft_iter(self): \"\"\" This is iterative", "\"\"\" if td_gpu is not None: self.assertTrue(hasattr(td_gpu, 'xocc')) self.assertTrue(hasattr(td_gpu, 'xvrt'))", "GPU=True) except: td_gpu = None class KnowValues(unittest.TestCase): def test_tddft_iter(self): \"\"\"", "2 electrons self.assertEqual(td_gpu.xocc[0].shape[0], 4) self.assertEqual(td_gpu.xvrt[0].shape[0], 19) dn0 = td_gpu.apply_rf0(td_gpu.moms1[:,0]) if", "+ H2 -- 2 electrons self.assertEqual(td.xocc[0].shape[0], 4) self.assertEqual(td.xvrt[0].shape[0], 19) dn0", "the valence + H2 -- 2 electrons self.assertEqual(td_gpu.xocc[0].shape[0], 4) self.assertEqual(td_gpu.xvrt[0].shape[0],", "import misc libnao_gpu = misc.load_library(\"libnao_gpu\") td_gpu = tddft_iter(label='water', cd=dname, GPU=True)", "valence + H2 -- 2 electrons self.assertEqual(td_gpu.xocc[0].shape[0], 4) self.assertEqual(td_gpu.xvrt[0].shape[0], 19)", "test_tddft_iter_gpu(self): \"\"\" Test GPU version \"\"\" if td_gpu is not", "electrons self.assertEqual(td.xocc[0].shape[0], 4) self.assertEqual(td.xvrt[0].shape[0], 19) dn0 = td.apply_rf0(td.moms1[:,0]) def test_tddft_iter_gpu(self):", "# water: O -- 6 electrons in the valence +", "self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water: O -- 6 electrons in the valence", "2 electrons self.assertEqual(td.xocc[0].shape[0], 4) self.assertEqual(td.xvrt[0].shape[0], 19) dn0 = td.apply_rf0(td.moms1[:,0]) def", "+ H2 -- 2 electrons self.assertEqual(td_gpu.xocc[0].shape[0], 4) self.assertEqual(td_gpu.xvrt[0].shape[0], 19) dn0", "class KnowValues(unittest.TestCase): def test_tddft_iter(self): \"\"\" This is iterative TDDFT with", "pyscf.nao import tddft_iter dname = os.path.dirname(os.path.abspath(__file__)) td = tddft_iter(label='water', cd=dname)", "'xocc')) self.assertTrue(hasattr(td_gpu, 'xvrt')) self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water: O -- 6 electrons", "with SIESTA starting point \"\"\" self.assertTrue(hasattr(td, 'xocc')) self.assertTrue(hasattr(td, 'xvrt')) self.assertTrue(td.ksn2f.sum()==8.0)", "self.assertTrue(hasattr(td, 'xvrt')) self.assertTrue(td.ksn2f.sum()==8.0) # water: O -- 6 electrons in", "td.apply_rf0(td.moms1[:,0]) def test_tddft_iter_gpu(self): \"\"\" Test GPU version \"\"\" if td_gpu", "test_tddft_iter(self): \"\"\" This is iterative TDDFT with SIESTA starting point", "electrons self.assertEqual(td_gpu.xocc[0].shape[0], 4) self.assertEqual(td_gpu.xvrt[0].shape[0], 19) dn0 = td_gpu.apply_rf0(td_gpu.moms1[:,0]) if __name__", "td_gpu = tddft_iter(label='water', cd=dname, GPU=True) except: td_gpu = None class", "__future__ import print_function, division import os,unittest from pyscf.nao import tddft_iter", "the valence + H2 -- 2 electrons self.assertEqual(td.xocc[0].shape[0], 4) self.assertEqual(td.xvrt[0].shape[0],", "4) self.assertEqual(td.xvrt[0].shape[0], 19) dn0 = td.apply_rf0(td.moms1[:,0]) def test_tddft_iter_gpu(self): \"\"\" Test", "misc libnao_gpu = misc.load_library(\"libnao_gpu\") td_gpu = tddft_iter(label='water', cd=dname, GPU=True) except:", "self.assertEqual(td.xvrt[0].shape[0], 19) dn0 = td.apply_rf0(td.moms1[:,0]) def test_tddft_iter_gpu(self): \"\"\" Test GPU", "self.assertTrue(hasattr(td_gpu, 'xocc')) self.assertTrue(hasattr(td_gpu, 'xvrt')) self.assertTrue(td_gpu.ksn2f.sum()==8.0) # water: O -- 6", "misc.load_library(\"libnao_gpu\") td_gpu = tddft_iter(label='water', cd=dname, GPU=True) except: td_gpu = None", "= td.apply_rf0(td.moms1[:,0]) def test_tddft_iter_gpu(self): \"\"\" Test GPU version \"\"\" if", "except: td_gpu = None class KnowValues(unittest.TestCase): def test_tddft_iter(self): \"\"\" This", "-- 2 electrons self.assertEqual(td.xocc[0].shape[0], 4) self.assertEqual(td.xvrt[0].shape[0], 19) dn0 = td.apply_rf0(td.moms1[:,0])", "electrons in the valence + H2 -- 2 electrons self.assertEqual(td.xocc[0].shape[0],", "tddft_iter(label='water', cd=dname, GPU=True) except: td_gpu = None class KnowValues(unittest.TestCase): def", "19) dn0 = td.apply_rf0(td.moms1[:,0]) def test_tddft_iter_gpu(self): \"\"\" Test GPU version", "self.assertTrue(td.ksn2f.sum()==8.0) # water: O -- 6 electrons in the valence", "from pyscf.nao import tddft_iter dname = os.path.dirname(os.path.abspath(__file__)) td = tddft_iter(label='water',", "This is iterative TDDFT with SIESTA starting point \"\"\" self.assertTrue(hasattr(td,", "H2 -- 2 electrons self.assertEqual(td.xocc[0].shape[0], 4) self.assertEqual(td.xvrt[0].shape[0], 19) dn0 =", "self.assertEqual(td_gpu.xvrt[0].shape[0], 19) dn0 = td_gpu.apply_rf0(td_gpu.moms1[:,0]) if __name__ == \"__main__\": unittest.main()", "pyscf.lib import misc libnao_gpu = misc.load_library(\"libnao_gpu\") td_gpu = tddft_iter(label='water', cd=dname,", "SIESTA starting point \"\"\" self.assertTrue(hasattr(td, 'xocc')) self.assertTrue(hasattr(td, 'xvrt')) self.assertTrue(td.ksn2f.sum()==8.0) #", "KnowValues(unittest.TestCase): def test_tddft_iter(self): \"\"\" This is iterative TDDFT with SIESTA" ]
[ "line in open(path.join(dirname, filename))) return [line for line in lines", "for line in open(path.join(dirname, filename))) return [line for line in", "in lines if line and not line.startswith(\"#\")] setup( name='osm2geojson', version='0.1.27',", "OSM and Overpass JSON', long_description=long_description, long_description_content_type='text/markdown', keywords='geometry gis osm parsing',", "with io.open(path.join(dirname, 'README.md'), encoding='utf-8') as f: long_description = f.read() def", "and Overpass JSON', long_description=long_description, long_description_content_type='text/markdown', keywords='geometry gis osm parsing', author='<NAME>',", "f.read() def parse_requirements(filename): lines = (line.strip() for line in open(path.join(dirname,", "open(path.join(dirname, filename))) return [line for line in lines if line", "io from os import path from setuptools import setup dirname", "path from setuptools import setup dirname = path.abspath(path.dirname(__file__)) with io.open(path.join(dirname,", "dirname = path.abspath(path.dirname(__file__)) with io.open(path.join(dirname, 'README.md'), encoding='utf-8') as f: long_description", "version='0.1.27', license='MIT', description='Parse OSM and Overpass JSON', long_description=long_description, long_description_content_type='text/markdown', keywords='geometry", "if line and not line.startswith(\"#\")] setup( name='osm2geojson', version='0.1.27', license='MIT', description='Parse", "lines = (line.strip() for line in open(path.join(dirname, filename))) return [line", "path.abspath(path.dirname(__file__)) with io.open(path.join(dirname, 'README.md'), encoding='utf-8') as f: long_description = f.read()", "keywords='geometry gis osm parsing', author='<NAME>', author_email='<EMAIL>', url='https://github.com/aspectumapp/osm2geojson', packages=['osm2geojson'], include_package_data=True, install_requires=parse_requirements(\"requirements.txt\")", "name='osm2geojson', version='0.1.27', license='MIT', description='Parse OSM and Overpass JSON', long_description=long_description, long_description_content_type='text/markdown',", "os import path from setuptools import setup dirname = path.abspath(path.dirname(__file__))", "long_description=long_description, long_description_content_type='text/markdown', keywords='geometry gis osm parsing', author='<NAME>', author_email='<EMAIL>', url='https://github.com/aspectumapp/osm2geojson', packages=['osm2geojson'],", "Overpass JSON', long_description=long_description, long_description_content_type='text/markdown', keywords='geometry gis osm parsing', author='<NAME>', author_email='<EMAIL>',", "f: long_description = f.read() def parse_requirements(filename): lines = (line.strip() for", "license='MIT', description='Parse OSM and Overpass JSON', long_description=long_description, long_description_content_type='text/markdown', keywords='geometry gis", "setup dirname = path.abspath(path.dirname(__file__)) with io.open(path.join(dirname, 'README.md'), encoding='utf-8') as f:", "filename))) return [line for line in lines if line and", "in open(path.join(dirname, filename))) return [line for line in lines if", "setup( name='osm2geojson', version='0.1.27', license='MIT', description='Parse OSM and Overpass JSON', long_description=long_description,", "line and not line.startswith(\"#\")] setup( name='osm2geojson', version='0.1.27', license='MIT', description='Parse OSM", "parse_requirements(filename): lines = (line.strip() for line in open(path.join(dirname, filename))) return", "import setup dirname = path.abspath(path.dirname(__file__)) with io.open(path.join(dirname, 'README.md'), encoding='utf-8') as", "as f: long_description = f.read() def parse_requirements(filename): lines = (line.strip()", "not line.startswith(\"#\")] setup( name='osm2geojson', version='0.1.27', license='MIT', description='Parse OSM and Overpass", "and not line.startswith(\"#\")] setup( name='osm2geojson', version='0.1.27', license='MIT', description='Parse OSM and", "import io from os import path from setuptools import setup", "= f.read() def parse_requirements(filename): lines = (line.strip() for line in", "line in lines if line and not line.startswith(\"#\")] setup( name='osm2geojson',", "from setuptools import setup dirname = path.abspath(path.dirname(__file__)) with io.open(path.join(dirname, 'README.md'),", "long_description_content_type='text/markdown', keywords='geometry gis osm parsing', author='<NAME>', author_email='<EMAIL>', url='https://github.com/aspectumapp/osm2geojson', packages=['osm2geojson'], include_package_data=True,", "setuptools import setup dirname = path.abspath(path.dirname(__file__)) with io.open(path.join(dirname, 'README.md'), encoding='utf-8')", "= (line.strip() for line in open(path.join(dirname, filename))) return [line for", "from os import path from setuptools import setup dirname =", "gis osm parsing', author='<NAME>', author_email='<EMAIL>', url='https://github.com/aspectumapp/osm2geojson', packages=['osm2geojson'], include_package_data=True, install_requires=parse_requirements(\"requirements.txt\") )", "[line for line in lines if line and not line.startswith(\"#\")]", "for line in lines if line and not line.startswith(\"#\")] setup(", "line.startswith(\"#\")] setup( name='osm2geojson', version='0.1.27', license='MIT', description='Parse OSM and Overpass JSON',", "import path from setuptools import setup dirname = path.abspath(path.dirname(__file__)) with", "(line.strip() for line in open(path.join(dirname, filename))) return [line for line", "JSON', long_description=long_description, long_description_content_type='text/markdown', keywords='geometry gis osm parsing', author='<NAME>', author_email='<EMAIL>', url='https://github.com/aspectumapp/osm2geojson',", "def parse_requirements(filename): lines = (line.strip() for line in open(path.join(dirname, filename)))", "return [line for line in lines if line and not", "description='Parse OSM and Overpass JSON', long_description=long_description, long_description_content_type='text/markdown', keywords='geometry gis osm", "encoding='utf-8') as f: long_description = f.read() def parse_requirements(filename): lines =", "= path.abspath(path.dirname(__file__)) with io.open(path.join(dirname, 'README.md'), encoding='utf-8') as f: long_description =", "long_description = f.read() def parse_requirements(filename): lines = (line.strip() for line", "'README.md'), encoding='utf-8') as f: long_description = f.read() def parse_requirements(filename): lines", "io.open(path.join(dirname, 'README.md'), encoding='utf-8') as f: long_description = f.read() def parse_requirements(filename):", "lines if line and not line.startswith(\"#\")] setup( name='osm2geojson', version='0.1.27', license='MIT'," ]
[ "Precos WHERE nome_produto = ?', (nome,)) registro = cursor.fetchone() if", "as cursor: cursor.execute('SELECT * FROM Precos WHERE nome_produto = ?',", "= cursor.fetchone() if not(registro is None): print(f'Nome: {registro[0]} | Preço:", "import closing nome = input('Nome do produto: ').lower().capitalize() with sqlite3.connect('precos.db')", "with closing(conexao.cursor()) as cursor: cursor.execute('SELECT * FROM Precos WHERE nome_produto", "closing nome = input('Nome do produto: ').lower().capitalize() with sqlite3.connect('precos.db') as", "(nome,)) registro = cursor.fetchone() if not(registro is None): print(f'Nome: {registro[0]}", "* FROM Precos WHERE nome_produto = ?', (nome,)) registro =", "cursor.fetchone() if not(registro is None): print(f'Nome: {registro[0]} | Preço: R${registro[1]:.2f}')", "as conexao: with closing(conexao.cursor()) as cursor: cursor.execute('SELECT * FROM Precos", "= ?', (valor, registro[0])) if cursor.rowcount == 1: conexao.commit() print('Alteração", "valor: R$')) cursor.execute('UPDATE Precos SET preco = ? WHERE nome_produto", "input('Nome do produto: ').lower().capitalize() with sqlite3.connect('precos.db') as conexao: with closing(conexao.cursor())", "(valor, registro[0])) if cursor.rowcount == 1: conexao.commit() print('Alteração gravada.') else:", "= float(input('Novo valor: R$')) cursor.execute('UPDATE Precos SET preco = ?", "do produto: ').lower().capitalize() with sqlite3.connect('precos.db') as conexao: with closing(conexao.cursor()) as", "not(registro is None): print(f'Nome: {registro[0]} | Preço: R${registro[1]:.2f}') valor =", "float(input('Novo valor: R$')) cursor.execute('UPDATE Precos SET preco = ? WHERE", "preco = ? WHERE nome_produto = ?', (valor, registro[0])) if", "? WHERE nome_produto = ?', (valor, registro[0])) if cursor.rowcount ==", "conexao.commit() print('Alteração gravada.') else: conexao.rollback() print('Alteração abortada.') else: print(f'Produto {nome}", "print('Alteração gravada.') else: conexao.rollback() print('Alteração abortada.') else: print(f'Produto {nome} não", "conexao: with closing(conexao.cursor()) as cursor: cursor.execute('SELECT * FROM Precos WHERE", "if not(registro is None): print(f'Nome: {registro[0]} | Preço: R${registro[1]:.2f}') valor", "= ? WHERE nome_produto = ?', (valor, registro[0])) if cursor.rowcount", "valor = float(input('Novo valor: R$')) cursor.execute('UPDATE Precos SET preco =", "?', (nome,)) registro = cursor.fetchone() if not(registro is None): print(f'Nome:", "is None): print(f'Nome: {registro[0]} | Preço: R${registro[1]:.2f}') valor = float(input('Novo", "cursor.rowcount == 1: conexao.commit() print('Alteração gravada.') else: conexao.rollback() print('Alteração abortada.')", "R$')) cursor.execute('UPDATE Precos SET preco = ? WHERE nome_produto =", "contextlib import closing nome = input('Nome do produto: ').lower().capitalize() with", "closing(conexao.cursor()) as cursor: cursor.execute('SELECT * FROM Precos WHERE nome_produto =", "WHERE nome_produto = ?', (valor, registro[0])) if cursor.rowcount == 1:", "| Preço: R${registro[1]:.2f}') valor = float(input('Novo valor: R$')) cursor.execute('UPDATE Precos", "== 1: conexao.commit() print('Alteração gravada.') else: conexao.rollback() print('Alteração abortada.') else:", "import sqlite3 from contextlib import closing nome = input('Nome do", "cursor.execute('SELECT * FROM Precos WHERE nome_produto = ?', (nome,)) registro", "= input('Nome do produto: ').lower().capitalize() with sqlite3.connect('precos.db') as conexao: with", "Preço: R${registro[1]:.2f}') valor = float(input('Novo valor: R$')) cursor.execute('UPDATE Precos SET", "print(f'Nome: {registro[0]} | Preço: R${registro[1]:.2f}') valor = float(input('Novo valor: R$'))", "cursor: cursor.execute('SELECT * FROM Precos WHERE nome_produto = ?', (nome,))", "SET preco = ? WHERE nome_produto = ?', (valor, registro[0]))", "cursor.execute('UPDATE Precos SET preco = ? WHERE nome_produto = ?',", "nome = input('Nome do produto: ').lower().capitalize() with sqlite3.connect('precos.db') as conexao:", "from contextlib import closing nome = input('Nome do produto: ').lower().capitalize()", "1: conexao.commit() print('Alteração gravada.') else: conexao.rollback() print('Alteração abortada.') else: print(f'Produto", "gravada.') else: conexao.rollback() print('Alteração abortada.') else: print(f'Produto {nome} não encontrado.')", "registro[0])) if cursor.rowcount == 1: conexao.commit() print('Alteração gravada.') else: conexao.rollback()", "').lower().capitalize() with sqlite3.connect('precos.db') as conexao: with closing(conexao.cursor()) as cursor: cursor.execute('SELECT", "R${registro[1]:.2f}') valor = float(input('Novo valor: R$')) cursor.execute('UPDATE Precos SET preco", "?', (valor, registro[0])) if cursor.rowcount == 1: conexao.commit() print('Alteração gravada.')", "if cursor.rowcount == 1: conexao.commit() print('Alteração gravada.') else: conexao.rollback() print('Alteração", "sqlite3 from contextlib import closing nome = input('Nome do produto:", "produto: ').lower().capitalize() with sqlite3.connect('precos.db') as conexao: with closing(conexao.cursor()) as cursor:", "nome_produto = ?', (nome,)) registro = cursor.fetchone() if not(registro is", "= ?', (nome,)) registro = cursor.fetchone() if not(registro is None):", "registro = cursor.fetchone() if not(registro is None): print(f'Nome: {registro[0]} |", "nome_produto = ?', (valor, registro[0])) if cursor.rowcount == 1: conexao.commit()", "{registro[0]} | Preço: R${registro[1]:.2f}') valor = float(input('Novo valor: R$')) cursor.execute('UPDATE", "with sqlite3.connect('precos.db') as conexao: with closing(conexao.cursor()) as cursor: cursor.execute('SELECT *", "FROM Precos WHERE nome_produto = ?', (nome,)) registro = cursor.fetchone()", "Precos SET preco = ? WHERE nome_produto = ?', (valor,", "None): print(f'Nome: {registro[0]} | Preço: R${registro[1]:.2f}') valor = float(input('Novo valor:", "WHERE nome_produto = ?', (nome,)) registro = cursor.fetchone() if not(registro", "sqlite3.connect('precos.db') as conexao: with closing(conexao.cursor()) as cursor: cursor.execute('SELECT * FROM" ]
[ "not None: eq = self.eq.float() else: eq = None obj", "elif obj_b is not None: obj = LinearObjective(obj_b,obj_c) if le", "None if self.eq is not None: eq = self.eq.float() else:", "and eq_b.size(0) == 1: eq_b = eq_b.squeeze(0) eq = LinearEqConstraints(eq_A,eq_b)", "eq_b.ndim == 2 and eq_b.size(0) == 1: eq_b = eq_b.squeeze(0)", "if status != OPTIMAL: logger.warning(\"optimal not found, status:%s\",status) for post", "eq_A,eq_b = [convert(x) for x in eq] if eq_b.ndim ==", "return self.__class__(self.vars,obj,le,eq) def to(self,device): if self.le is not None: self.le.to(device)", "self.le is not None: self.le.to(device) else: le = None if", "= eq_cons self.vars = _vars self.n = len(_vars) @classmethod def", "self.eq is not None: self.eq.to(device) else: eq = None obj", "self.le is not None: le = self.le.float() else: le =", "import copy from jet20.backend.constraints import * from jet20.backend.obj import *", "p,x = post.postprocess(p,x,config) logger.debug(\"postprocessing name:%s, time used:%s\",post.name(),time.time()-start) return p.build_solution(x,p.obj(x),status,duals) def", "self.eq is not None: eq = self.eq.double() else: eq =", "= None if self.eq is not None: eq = self.eq.double()", "len(_vars) @classmethod def from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device(\"cpu\"),dtype=torch.float64): def convert(x): if x is not", "self.pres: start = time.time() p,x = pre.preprocess(p,x,config) logger.debug(\"preprocessing name:%s, time", "_vars self.x = x self.duals = None def __str__(self): return", "= obj_value self.vars = _vars self.x = x self.duals =", "def convert(x): if x is not None: if isinstance(x,torch.Tensor): return", "= QuadraticObjective(obj_Q,obj_b,obj_c) elif obj_b is not None: obj = LinearObjective(obj_b,obj_c)", "is not None: le = self.le.float() else: le = None", "None: x = torch.zeros(p.n).float().to(config.device) start = time.time() p_f32 = p.float()", "if isinstance(duals,(tuple,list)): duals = [d.double() for d in duals] else:", "time import copy from jet20.backend.constraints import * from jet20.backend.obj import", "isinstance(x,torch.Tensor): return x.type(dtype).to(device) else: return torch.tensor(x,dtype=dtype,device=device) else: return None if", "duals] else: duals = duals.double() if status == SUB_OPTIMAL: start", "eq_b.size(0) == 1: eq_b = eq_b.squeeze(0) eq = LinearEqConstraints(eq_A,eq_b) return", "x = x.double() if isinstance(duals,(tuple,list)): duals = [d.double() for d", "self.obj = obj self.le = le_cons self.eq = eq_cons self.vars", "= [] self.posts = [] def solve(self,p,config,x=None): for pre in", "time used:%s\",post.name(),time.time()-start) return p.build_solution(x,p.obj(x),status,duals) def register_pres(self,*pres): self.pres.extend(pres) def register_posts(self,*posts): self.posts.extend(posts)", "import * from jet20.backend.obj import * from jet20.backend.config import *", "None: eq_A,eq_b = [convert(x) for x in eq] if eq_b.ndim", "(self.obj_value,self.vars) __repr__ = __str__ class Problem(object): def __init__(self,_vars,obj,le_cons=None,eq_cons=None): self.obj =", "status == SUB_OPTIMAL: start = time.time() x,_,status,duals = solve(p,x,config,fast=False,duals=duals) logger.debug(\"precision", "logger.debug(\"fast-precision mode, time used:%s\",time.time()-start) if status == SUB_OPTIMAL: start =", "from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device(\"cpu\"),dtype=torch.float64): def convert(x): if x is not None: if isinstance(x,torch.Tensor):", "and le_b.size(0) == 1: le_b = le_b.squeeze(0) le = LinearLeConstraints(le_A,le_b)", "for post in self.posts: start = time.time() p,x = post.postprocess(p,x,config)", "self.eq.float() else: eq = None obj = self.obj.float() return self.__class__(self.vars,obj,le,eq)", "solve(self,p,config,x=None): for pre in self.pres: start = time.time() p,x =", "None: le = self.le.float() else: le = None if self.eq", "if eq_b.ndim == 2 and eq_b.size(0) == 1: eq_b =", "None if self.eq is not None: eq = self.eq.double() else:", "start = time.time() p,x = pre.preprocess(p,x,config) logger.debug(\"preprocessing name:%s, time used:%s\",pre.name(),time.time()-start)", "def solve(self,p,config,x=None): for pre in self.pres: start = time.time() p,x", "eq is not None: eq_A,eq_b = [convert(x) for x in", "logger.debug(\"fast mode, time used:%s\",time.time()-start) x = x.double() if isinstance(duals,(tuple,list)): duals", "= [d.double() for d in duals] else: duals = duals.double()", "if self.le is not None: self.le.to(device) else: le = None", "p,x = pre.preprocess(p,x,config) logger.debug(\"preprocessing name:%s, time used:%s\",pre.name(),time.time()-start) if x is", "from jet20.backend.obj import * from jet20.backend.config import * from jet20.backend.core", "{ var: v.item() for var,v in zip(self.vars,x)} return Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals) class", "== SUB_OPTIMAL: start = time.time() # p = p.double() x,_,status,duals", "not None: obj_Q,obj_b,obj_c = [convert(x) for x in obj] if", "is not None: obj_Q,obj_b,obj_c = [convert(x) for x in obj]", "return Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals) class Solver(object): def __init__(self): self.pres = [] self.posts", "if self.eq is not None: eq = self.eq.float() else: eq", "duals = [d.double() for d in duals] else: duals =", "= solve(p,x,config,fast=False,duals=duals) logger.debug(\"precision mode, time used:%s\",time.time()-start) if status != OPTIMAL:", "else: eq = None obj = self.obj.double() return self.__class__(self.vars,obj,le,eq) def", "time.time() p,x = pre.preprocess(p,x,config) logger.debug(\"preprocessing name:%s, time used:%s\",pre.name(),time.time()-start) if x", "self.__class__(self.vars,obj,le,eq) def double(self): if self.le is not None: le =", "= self.eq.float() else: eq = None obj = self.obj.float() return", "if isinstance(x,torch.Tensor): return x.type(dtype).to(device) else: return torch.tensor(x,dtype=dtype,device=device) else: return None", "self.obj.double() return self.__class__(self.vars,obj,le,eq) def to(self,device): if self.le is not None:", "found, status:%s\",status) for post in self.posts: start = time.time() p,x", "= self.obj.double() return self.__class__(self.vars,obj,le,eq) def to(self,device): if self.le is not", "is not None: obj = LinearObjective(obj_b,obj_c) if le is not", "Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals) class Solver(object): def __init__(self): self.pres = [] self.posts =", "<filename>jet20/backend/solver.py<gh_stars>1-10 import torch import time import copy from jet20.backend.constraints import", "def __str__(self): return \"obj_value: %s vars:%s\" % (self.obj_value,self.vars) __repr__ =", "duals = duals.double() if status == SUB_OPTIMAL: start = time.time()", "logger.debug(\"precision mode, time used:%s\",time.time()-start) if status != OPTIMAL: logger.warning(\"optimal not", "le = self.le.float() else: le = None if self.eq is", "= time.time() p,x = pre.preprocess(p,x,config) logger.debug(\"preprocessing name:%s, time used:%s\",pre.name(),time.time()-start) if", "p_f32 = p.float() x = x.float() x,_,status,duals = solve(p_f32,x,config,fast=True) logger.debug(\"fast", "self.duals = None def __str__(self): return \"obj_value: %s vars:%s\" %", "from jet20.backend.config import * from jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED import logging", "name:%s, time used:%s\",pre.name(),time.time()-start) if x is None: x = torch.zeros(p.n).float().to(config.device)", "not None: le = self.le.float() else: le = None if", "Solver(object): def __init__(self): self.pres = [] self.posts = [] def", "\"obj_value: %s vars:%s\" % (self.obj_value,self.vars) __repr__ = __str__ class Problem(object):", "= x.float() x,_,status,duals = solve(p_f32,x,config,fast=True) logger.debug(\"fast mode, time used:%s\",time.time()-start) x", "float(self): if self.le is not None: le = self.le.float() else:", "start = time.time() x,_,status,duals = solve(p,x,config,fast=False,duals=duals) logger.debug(\"precision mode, time used:%s\",time.time()-start)", "None obj = self.obj.double() return self.__class__(self.vars,obj,le,eq) def to(self,device): if self.le", "not None: eq_A,eq_b = [convert(x) for x in eq] if", "solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED import logging logger = logging.getLogger(__name__) class Solution(object): def __init__(self,x,_vars,obj_value,status,duals):", "= self.le.double() else: le = None if self.eq is not", "not None: obj = LinearObjective(obj_b,obj_c) if le is not None:", "Problem(object): def __init__(self,_vars,obj,le_cons=None,eq_cons=None): self.obj = obj self.le = le_cons self.eq", "= solve(p_f32,x,config,fast=True) logger.debug(\"fast mode, time used:%s\",time.time()-start) x = x.double() if", "duals.double() if status == SUB_OPTIMAL: start = time.time() # p", "in eq] if eq_b.ndim == 2 and eq_b.size(0) == 1:", "import time import copy from jet20.backend.constraints import * from jet20.backend.obj", "self.vars = _vars self.n = len(_vars) @classmethod def from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device(\"cpu\"),dtype=torch.float64): def", "self.eq.to(device) else: eq = None obj = self.obj.to(device) return self.__class__(self.vars,obj,le,eq)", "x in obj] if obj_Q is not None: obj =", "le_b.ndim == 2 and le_b.size(0) == 1: le_b = le_b.squeeze(0)", "== 1: le_b = le_b.squeeze(0) le = LinearLeConstraints(le_A,le_b) if eq", "solve(p,x,config,fast=True,duals=duals) logger.debug(\"fast-precision mode, time used:%s\",time.time()-start) if status == SUB_OPTIMAL: start", "else: eq = None obj = self.obj.to(device) return self.__class__(self.vars,obj,le,eq) def", "[] self.posts = [] def solve(self,p,config,x=None): for pre in self.pres:", "class Solution(object): def __init__(self,x,_vars,obj_value,status,duals): self.status = status self.obj_value = obj_value", "if le_b.ndim == 2 and le_b.size(0) == 1: le_b =", "is None: x = torch.zeros(p.n).float().to(config.device) start = time.time() p_f32 =", "def __init__(self,x,_vars,obj_value,status,duals): self.status = status self.obj_value = obj_value self.vars =", "not found, status:%s\",status) for post in self.posts: start = time.time()", "None: obj = QuadraticObjective(obj_Q,obj_b,obj_c) elif obj_b is not None: obj", "import logging logger = logging.getLogger(__name__) class Solution(object): def __init__(self,x,_vars,obj_value,status,duals): self.status", "x,_,status,duals = solve(p_f32,x,config,fast=True) logger.debug(\"fast mode, time used:%s\",time.time()-start) x = x.double()", "jet20.backend.config import * from jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED import logging logger", "self.posts = [] def solve(self,p,config,x=None): for pre in self.pres: start", "status self.obj_value = obj_value self.vars = _vars self.x = x", "post in self.posts: start = time.time() p,x = post.postprocess(p,x,config) logger.debug(\"postprocessing", "self.le is not None: le = self.le.double() else: le =", "logger.debug(\"preprocessing name:%s, time used:%s\",pre.name(),time.time()-start) if x is None: x =", "SUB_OPTIMAL: start = time.time() x,_,status,duals = solve(p,x,config,fast=False,duals=duals) logger.debug(\"precision mode, time", "eq = None obj = self.obj.double() return self.__class__(self.vars,obj,le,eq) def to(self,device):", "obj_value self.vars = _vars self.x = x self.duals = None", "return cls(_vars,obj,le,eq) def float(self): if self.le is not None: le", "= [] def solve(self,p,config,x=None): for pre in self.pres: start =", "torch.tensor(x,dtype=dtype,device=device) else: return None if obj is not None: obj_Q,obj_b,obj_c", "* from jet20.backend.config import * from jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED import", "= LinearLeConstraints(le_A,le_b) if eq is not None: eq_A,eq_b = [convert(x)", "for x in obj] if obj_Q is not None: obj", "self.obj.float() return self.__class__(self.vars,obj,le,eq) def double(self): if self.le is not None:", "__str__(self): return \"obj_value: %s vars:%s\" % (self.obj_value,self.vars) __repr__ = __str__", "if self.eq is not None: eq = self.eq.double() else: eq", "== 2 and le_b.size(0) == 1: le_b = le_b.squeeze(0) le", "self.eq is not None: eq = self.eq.float() else: eq =", "LinearLeConstraints(le_A,le_b) if eq is not None: eq_A,eq_b = [convert(x) for", "self.le.double() else: le = None if self.eq is not None:", "__init__(self,x,_vars,obj_value,status,duals): self.status = status self.obj_value = obj_value self.vars = _vars", "time used:%s\",time.time()-start) if status != OPTIMAL: logger.warning(\"optimal not found, status:%s\",status)", "is not None: if isinstance(x,torch.Tensor): return x.type(dtype).to(device) else: return torch.tensor(x,dtype=dtype,device=device)", "== 2 and eq_b.size(0) == 1: eq_b = eq_b.squeeze(0) eq", "not None: self.le.to(device) else: le = None if self.eq is", "cls(_vars,obj,le,eq) def float(self): if self.le is not None: le =", "pre in self.pres: start = time.time() p,x = pre.preprocess(p,x,config) logger.debug(\"preprocessing", "= None obj = self.obj.to(device) return self.__class__(self.vars,obj,le,eq) def build_solution(self,x,obj_value,status,duals): _vars", "pre.preprocess(p,x,config) logger.debug(\"preprocessing name:%s, time used:%s\",pre.name(),time.time()-start) if x is None: x", "obj_Q is not None: obj = QuadraticObjective(obj_Q,obj_b,obj_c) elif obj_b is", "obj = LinearObjective(obj_b,obj_c) if le is not None: le_A,le_b =", "= None if self.eq is not None: eq = self.eq.float()", "self.vars = _vars self.x = x self.duals = None def", "import torch import time import copy from jet20.backend.constraints import *", "torch import time import copy from jet20.backend.constraints import * from", "= _vars self.x = x self.duals = None def __str__(self):", "__init__(self,_vars,obj,le_cons=None,eq_cons=None): self.obj = obj self.le = le_cons self.eq = eq_cons", "= le_cons self.eq = eq_cons self.vars = _vars self.n =", "= time.time() # p = p.double() x,_,status,duals = solve(p,x,config,fast=True,duals=duals) logger.debug(\"fast-precision", "not None: self.eq.to(device) else: eq = None obj = self.obj.to(device)", "used:%s\",time.time()-start) x = x.double() if isinstance(duals,(tuple,list)): duals = [d.double() for", "d in duals] else: duals = duals.double() if status ==", "self.posts: start = time.time() p,x = post.postprocess(p,x,config) logger.debug(\"postprocessing name:%s, time", "eq_b.squeeze(0) eq = LinearEqConstraints(eq_A,eq_b) return cls(_vars,obj,le,eq) def float(self): if self.le", "= [convert(x) for x in le] if le_b.ndim == 2", "in obj] if obj_Q is not None: obj = QuadraticObjective(obj_Q,obj_b,obj_c)", "le] if le_b.ndim == 2 and le_b.size(0) == 1: le_b", "return self.__class__(self.vars,obj,le,eq) def build_solution(self,x,obj_value,status,duals): _vars = { var: v.item() for", "= None def __str__(self): return \"obj_value: %s vars:%s\" % (self.obj_value,self.vars)", "used:%s\",time.time()-start) if status != OPTIMAL: logger.warning(\"optimal not found, status:%s\",status) for", "[convert(x) for x in eq] if eq_b.ndim == 2 and", "p = p.double() x,_,status,duals = solve(p,x,config,fast=True,duals=duals) logger.debug(\"fast-precision mode, time used:%s\",time.time()-start)", "le_cons self.eq = eq_cons self.vars = _vars self.n = len(_vars)", "self.status = status self.obj_value = obj_value self.vars = _vars self.x", "import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED import logging logger = logging.getLogger(__name__) class Solution(object): def", "[convert(x) for x in obj] if obj_Q is not None:", "x = x.float() x,_,status,duals = solve(p_f32,x,config,fast=True) logger.debug(\"fast mode, time used:%s\",time.time()-start)", "jet20.backend.constraints import * from jet20.backend.obj import * from jet20.backend.config import", "status != OPTIMAL: logger.warning(\"optimal not found, status:%s\",status) for post in", "not None: le = self.le.double() else: le = None if", "x.type(dtype).to(device) else: return torch.tensor(x,dtype=dtype,device=device) else: return None if obj is", "le = None if self.eq is not None: self.eq.to(device) else:", "if x is None: x = torch.zeros(p.n).float().to(config.device) start = time.time()", "mode, time used:%s\",time.time()-start) x = x.double() if isinstance(duals,(tuple,list)): duals =", "logger.warning(\"optimal not found, status:%s\",status) for post in self.posts: start =", "eq = self.eq.double() else: eq = None obj = self.obj.double()", "None: self.le.to(device) else: le = None if self.eq is not", "v.item() for var,v in zip(self.vars,x)} return Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals) class Solver(object): def", "solve(p,x,config,fast=False,duals=duals) logger.debug(\"precision mode, time used:%s\",time.time()-start) if status != OPTIMAL: logger.warning(\"optimal", "= le_b.squeeze(0) le = LinearLeConstraints(le_A,le_b) if eq is not None:", "= status self.obj_value = obj_value self.vars = _vars self.x =", "torch.zeros(p.n).float().to(config.device) start = time.time() p_f32 = p.float() x = x.float()", "None: eq = self.eq.double() else: eq = None obj =", "return \"obj_value: %s vars:%s\" % (self.obj_value,self.vars) __repr__ = __str__ class", "not None: if isinstance(x,torch.Tensor): return x.type(dtype).to(device) else: return torch.tensor(x,dtype=dtype,device=device) else:", "2 and le_b.size(0) == 1: le_b = le_b.squeeze(0) le =", "None if obj is not None: obj_Q,obj_b,obj_c = [convert(x) for", "convert(x): if x is not None: if isinstance(x,torch.Tensor): return x.type(dtype).to(device)", "vars:%s\" % (self.obj_value,self.vars) __repr__ = __str__ class Problem(object): def __init__(self,_vars,obj,le_cons=None,eq_cons=None):", "if status == SUB_OPTIMAL: start = time.time() # p =", "= time.time() x,_,status,duals = solve(p,x,config,fast=False,duals=duals) logger.debug(\"precision mode, time used:%s\",time.time()-start) if", "self.__class__(self.vars,obj,le,eq) def build_solution(self,x,obj_value,status,duals): _vars = { var: v.item() for var,v", "= solve(p,x,config,fast=True,duals=duals) logger.debug(\"fast-precision mode, time used:%s\",time.time()-start) if status == SUB_OPTIMAL:", "time used:%s\",pre.name(),time.time()-start) if x is None: x = torch.zeros(p.n).float().to(config.device) start", "__str__ class Problem(object): def __init__(self,_vars,obj,le_cons=None,eq_cons=None): self.obj = obj self.le =", "if self.le is not None: le = self.le.double() else: le", "return torch.tensor(x,dtype=dtype,device=device) else: return None if obj is not None:", "obj_Q,obj_b,obj_c = [convert(x) for x in obj] if obj_Q is", "else: return torch.tensor(x,dtype=dtype,device=device) else: return None if obj is not", "* from jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED import logging logger = logging.getLogger(__name__)", "for x in eq] if eq_b.ndim == 2 and eq_b.size(0)", "time used:%s\",time.time()-start) if status == SUB_OPTIMAL: start = time.time() x,_,status,duals", "mode, time used:%s\",time.time()-start) if status != OPTIMAL: logger.warning(\"optimal not found,", "# p = p.double() x,_,status,duals = solve(p,x,config,fast=True,duals=duals) logger.debug(\"fast-precision mode, time", "!= OPTIMAL: logger.warning(\"optimal not found, status:%s\",status) for post in self.posts:", "@classmethod def from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device(\"cpu\"),dtype=torch.float64): def convert(x): if x is not None:", "used:%s\",time.time()-start) if status == SUB_OPTIMAL: start = time.time() x,_,status,duals =", "% (self.obj_value,self.vars) __repr__ = __str__ class Problem(object): def __init__(self,_vars,obj,le_cons=None,eq_cons=None): self.obj", "class Solver(object): def __init__(self): self.pres = [] self.posts = []", "x is None: x = torch.zeros(p.n).float().to(config.device) start = time.time() p_f32", "= None obj = self.obj.float() return self.__class__(self.vars,obj,le,eq) def double(self): if", "= __str__ class Problem(object): def __init__(self,_vars,obj,le_cons=None,eq_cons=None): self.obj = obj self.le", "= self.obj.to(device) return self.__class__(self.vars,obj,le,eq) def build_solution(self,x,obj_value,status,duals): _vars = { var:", "is not None: eq = self.eq.double() else: eq = None", "if x is not None: if isinstance(x,torch.Tensor): return x.type(dtype).to(device) else:", "from jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED import logging logger = logging.getLogger(__name__) class", "for d in duals] else: duals = duals.double() if status", "x,_,status,duals = solve(p,x,config,fast=True,duals=duals) logger.debug(\"fast-precision mode, time used:%s\",time.time()-start) if status ==", "x.float() x,_,status,duals = solve(p_f32,x,config,fast=True) logger.debug(\"fast mode, time used:%s\",time.time()-start) x =", "OPTIMAL: logger.warning(\"optimal not found, status:%s\",status) for post in self.posts: start", "return x.type(dtype).to(device) else: return torch.tensor(x,dtype=dtype,device=device) else: return None if obj", "jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED import logging logger = logging.getLogger(__name__) class Solution(object):", "self.pres = [] self.posts = [] def solve(self,p,config,x=None): for pre", "= self.obj.float() return self.__class__(self.vars,obj,le,eq) def double(self): if self.le is not", "var: v.item() for var,v in zip(self.vars,x)} return Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals) class Solver(object):", "2 and eq_b.size(0) == 1: eq_b = eq_b.squeeze(0) eq =", "1: eq_b = eq_b.squeeze(0) eq = LinearEqConstraints(eq_A,eq_b) return cls(_vars,obj,le,eq) def", "__repr__ = __str__ class Problem(object): def __init__(self,_vars,obj,le_cons=None,eq_cons=None): self.obj = obj", "x is not None: if isinstance(x,torch.Tensor): return x.type(dtype).to(device) else: return", "return self.__class__(self.vars,obj,le,eq) def double(self): if self.le is not None: le", "not None: eq = self.eq.double() else: eq = None obj", "eq = None obj = self.obj.to(device) return self.__class__(self.vars,obj,le,eq) def build_solution(self,x,obj_value,status,duals):", "None: le = self.le.double() else: le = None if self.eq", "= torch.zeros(p.n).float().to(config.device) start = time.time() p_f32 = p.float() x =", "= x self.duals = None def __str__(self): return \"obj_value: %s", "obj = self.obj.double() return self.__class__(self.vars,obj,le,eq) def to(self,device): if self.le is", "= len(_vars) @classmethod def from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device(\"cpu\"),dtype=torch.float64): def convert(x): if x is", "is not None: le_A,le_b = [convert(x) for x in le]", "def __init__(self): self.pres = [] self.posts = [] def solve(self,p,config,x=None):", "= obj self.le = le_cons self.eq = eq_cons self.vars =", "start = time.time() p_f32 = p.float() x = x.float() x,_,status,duals", "not None: le_A,le_b = [convert(x) for x in le] if", "_vars = { var: v.item() for var,v in zip(self.vars,x)} return", "= LinearObjective(obj_b,obj_c) if le is not None: le_A,le_b = [convert(x)", "* from jet20.backend.obj import * from jet20.backend.config import * from", "self.le = le_cons self.eq = eq_cons self.vars = _vars self.n", "is not None: eq_A,eq_b = [convert(x) for x in eq]", "obj = QuadraticObjective(obj_Q,obj_b,obj_c) elif obj_b is not None: obj =", "= logging.getLogger(__name__) class Solution(object): def __init__(self,x,_vars,obj_value,status,duals): self.status = status self.obj_value", "le_b.squeeze(0) le = LinearLeConstraints(le_A,le_b) if eq is not None: eq_A,eq_b", "if eq is not None: eq_A,eq_b = [convert(x) for x", "import * from jet20.backend.config import * from jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED", "time used:%s\",time.time()-start) x = x.double() if isinstance(duals,(tuple,list)): duals = [d.double()", "if obj_Q is not None: obj = QuadraticObjective(obj_Q,obj_b,obj_c) elif obj_b", "Solution(object): def __init__(self,x,_vars,obj_value,status,duals): self.status = status self.obj_value = obj_value self.vars", "def from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device(\"cpu\"),dtype=torch.float64): def convert(x): if x is not None: if", "QuadraticObjective(obj_Q,obj_b,obj_c) elif obj_b is not None: obj = LinearObjective(obj_b,obj_c) if", "is not None: eq = self.eq.float() else: eq = None", "eq = LinearEqConstraints(eq_A,eq_b) return cls(_vars,obj,le,eq) def float(self): if self.le is", "obj = self.obj.float() return self.__class__(self.vars,obj,le,eq) def double(self): if self.le is", "in zip(self.vars,x)} return Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals) class Solver(object): def __init__(self): self.pres =", "obj self.le = le_cons self.eq = eq_cons self.vars = _vars", "1: le_b = le_b.squeeze(0) le = LinearLeConstraints(le_A,le_b) if eq is", "status:%s\",status) for post in self.posts: start = time.time() p,x =", "[] def solve(self,p,config,x=None): for pre in self.pres: start = time.time()", "else: duals = duals.double() if status == SUB_OPTIMAL: start =", "LinearObjective(obj_b,obj_c) if le is not None: le_A,le_b = [convert(x) for", "le is not None: le_A,le_b = [convert(x) for x in", "if self.le is not None: le = self.le.float() else: le", "jet20.backend.obj import * from jet20.backend.config import * from jet20.backend.core import", "self.eq = eq_cons self.vars = _vars self.n = len(_vars) @classmethod", "if obj is not None: obj_Q,obj_b,obj_c = [convert(x) for x", "self.obj.to(device) return self.__class__(self.vars,obj,le,eq) def build_solution(self,x,obj_value,status,duals): _vars = { var: v.item()", "return None if obj is not None: obj_Q,obj_b,obj_c = [convert(x)", "solve(p_f32,x,config,fast=True) logger.debug(\"fast mode, time used:%s\",time.time()-start) x = x.double() if isinstance(duals,(tuple,list)):", "to(self,device): if self.le is not None: self.le.to(device) else: le =", "in le] if le_b.ndim == 2 and le_b.size(0) == 1:", "= { var: v.item() for var,v in zip(self.vars,x)} return Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals)", "logging.getLogger(__name__) class Solution(object): def __init__(self,x,_vars,obj_value,status,duals): self.status = status self.obj_value =", "SUB_OPTIMAL: start = time.time() # p = p.double() x,_,status,duals =", "== SUB_OPTIMAL: start = time.time() x,_,status,duals = solve(p,x,config,fast=False,duals=duals) logger.debug(\"precision mode,", "var,v in zip(self.vars,x)} return Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals) class Solver(object): def __init__(self): self.pres", "_vars self.n = len(_vars) @classmethod def from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device(\"cpu\"),dtype=torch.float64): def convert(x): if", "le = LinearLeConstraints(le_A,le_b) if eq is not None: eq_A,eq_b =", "logging logger = logging.getLogger(__name__) class Solution(object): def __init__(self,x,_vars,obj_value,status,duals): self.status =", "= eq_b.squeeze(0) eq = LinearEqConstraints(eq_A,eq_b) return cls(_vars,obj,le,eq) def float(self): if", "in duals] else: duals = duals.double() if status == SUB_OPTIMAL:", "p.double() x,_,status,duals = solve(p,x,config,fast=True,duals=duals) logger.debug(\"fast-precision mode, time used:%s\",time.time()-start) if status", "= None obj = self.obj.double() return self.__class__(self.vars,obj,le,eq) def to(self,device): if", "class Problem(object): def __init__(self,_vars,obj,le_cons=None,eq_cons=None): self.obj = obj self.le = le_cons", "def double(self): if self.le is not None: le = self.le.double()", "obj] if obj_Q is not None: obj = QuadraticObjective(obj_Q,obj_b,obj_c) elif", "p.float() x = x.float() x,_,status,duals = solve(p_f32,x,config,fast=True) logger.debug(\"fast mode, time", "for var,v in zip(self.vars,x)} return Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals) class Solver(object): def __init__(self):", "x.double() if isinstance(duals,(tuple,list)): duals = [d.double() for d in duals]", "= x.double() if isinstance(duals,(tuple,list)): duals = [d.double() for d in", "None obj = self.obj.float() return self.__class__(self.vars,obj,le,eq) def double(self): if self.le", "self.le.to(device) else: le = None if self.eq is not None:", "obj_b is not None: obj = LinearObjective(obj_b,obj_c) if le is", "else: eq = None obj = self.obj.float() return self.__class__(self.vars,obj,le,eq) def", "= self.eq.double() else: eq = None obj = self.obj.double() return", "eq] if eq_b.ndim == 2 and eq_b.size(0) == 1: eq_b", "in self.posts: start = time.time() p,x = post.postprocess(p,x,config) logger.debug(\"postprocessing name:%s,", "time.time() x,_,status,duals = solve(p,x,config,fast=False,duals=duals) logger.debug(\"precision mode, time used:%s\",time.time()-start) if status", "def to(self,device): if self.le is not None: self.le.to(device) else: le", "obj = self.obj.to(device) return self.__class__(self.vars,obj,le,eq) def build_solution(self,x,obj_value,status,duals): _vars = {", "None: self.eq.to(device) else: eq = None obj = self.obj.to(device) return", "self.__class__(self.vars,obj,le,eq) def to(self,device): if self.le is not None: self.le.to(device) else:", "self.obj_value = obj_value self.vars = _vars self.x = x self.duals", "le = None if self.eq is not None: eq =", "time.time() # p = p.double() x,_,status,duals = solve(p,x,config,fast=True,duals=duals) logger.debug(\"fast-precision mode,", "%s vars:%s\" % (self.obj_value,self.vars) __repr__ = __str__ class Problem(object): def", "x,_,status,duals = solve(p,x,config,fast=False,duals=duals) logger.debug(\"precision mode, time used:%s\",time.time()-start) if status !=", "logger.debug(\"postprocessing name:%s, time used:%s\",post.name(),time.time()-start) return p.build_solution(x,p.obj(x),status,duals) def register_pres(self,*pres): self.pres.extend(pres) def", "eq = None obj = self.obj.float() return self.__class__(self.vars,obj,le,eq) def double(self):", "None: if isinstance(x,torch.Tensor): return x.type(dtype).to(device) else: return torch.tensor(x,dtype=dtype,device=device) else: return", "= time.time() p_f32 = p.float() x = x.float() x,_,status,duals =", "for pre in self.pres: start = time.time() p,x = pre.preprocess(p,x,config)", "= _vars self.n = len(_vars) @classmethod def from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device(\"cpu\"),dtype=torch.float64): def convert(x):", "is not None: le = self.le.double() else: le = None", "x in eq] if eq_b.ndim == 2 and eq_b.size(0) ==", "eq = self.eq.float() else: eq = None obj = self.obj.float()", "self.n = len(_vars) @classmethod def from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device(\"cpu\"),dtype=torch.float64): def convert(x): if x", "from jet20.backend.constraints import * from jet20.backend.obj import * from jet20.backend.config", "None: obj_Q,obj_b,obj_c = [convert(x) for x in obj] if obj_Q", "= None if self.eq is not None: self.eq.to(device) else: eq", "le_A,le_b = [convert(x) for x in le] if le_b.ndim ==", "= p.double() x,_,status,duals = solve(p,x,config,fast=True,duals=duals) logger.debug(\"fast-precision mode, time used:%s\",time.time()-start) if", "__init__(self): self.pres = [] self.posts = [] def solve(self,p,config,x=None): for", "if self.eq is not None: self.eq.to(device) else: eq = None", "logger = logging.getLogger(__name__) class Solution(object): def __init__(self,x,_vars,obj_value,status,duals): self.status = status", "LinearEqConstraints(eq_A,eq_b) return cls(_vars,obj,le,eq) def float(self): if self.le is not None:", "copy from jet20.backend.constraints import * from jet20.backend.obj import * from", "= p.float() x = x.float() x,_,status,duals = solve(p_f32,x,config,fast=True) logger.debug(\"fast mode,", "else: le = None if self.eq is not None: self.eq.to(device)", "isinstance(duals,(tuple,list)): duals = [d.double() for d in duals] else: duals", "build_solution(self,x,obj_value,status,duals): _vars = { var: v.item() for var,v in zip(self.vars,x)}", "x in le] if le_b.ndim == 2 and le_b.size(0) ==", "eq_b = eq_b.squeeze(0) eq = LinearEqConstraints(eq_A,eq_b) return cls(_vars,obj,le,eq) def float(self):", "import * from jet20.backend.core import solve,OPTIMAL,SUB_OPTIMAL,USER_STOPPED import logging logger =", "x self.duals = None def __str__(self): return \"obj_value: %s vars:%s\"", "= pre.preprocess(p,x,config) logger.debug(\"preprocessing name:%s, time used:%s\",pre.name(),time.time()-start) if x is None:", "zip(self.vars,x)} return Solution(x.cpu().numpy(),_vars,obj_value.item(),status,duals) class Solver(object): def __init__(self): self.pres = []", "else: return None if obj is not None: obj_Q,obj_b,obj_c =", "double(self): if self.le is not None: le = self.le.double() else:", "None def __str__(self): return \"obj_value: %s vars:%s\" % (self.obj_value,self.vars) __repr__", "def build_solution(self,x,obj_value,status,duals): _vars = { var: v.item() for var,v in", "start = time.time() # p = p.double() x,_,status,duals = solve(p,x,config,fast=True,duals=duals)", "eq_cons self.vars = _vars self.n = len(_vars) @classmethod def from_numpy(cls,_vars,obj=None,le=None,eq=None,device=torch.device(\"cpu\"),dtype=torch.float64):", "def __init__(self,_vars,obj,le_cons=None,eq_cons=None): self.obj = obj self.le = le_cons self.eq =", "= post.postprocess(p,x,config) logger.debug(\"postprocessing name:%s, time used:%s\",post.name(),time.time()-start) return p.build_solution(x,p.obj(x),status,duals) def register_pres(self,*pres):", "le_b.size(0) == 1: le_b = le_b.squeeze(0) le = LinearLeConstraints(le_A,le_b) if", "if status == SUB_OPTIMAL: start = time.time() x,_,status,duals = solve(p,x,config,fast=False,duals=duals)", "= time.time() p,x = post.postprocess(p,x,config) logger.debug(\"postprocessing name:%s, time used:%s\",post.name(),time.time()-start) return", "None if self.eq is not None: self.eq.to(device) else: eq =", "x = torch.zeros(p.n).float().to(config.device) start = time.time() p_f32 = p.float() x", "None: le_A,le_b = [convert(x) for x in le] if le_b.ndim", "None obj = self.obj.to(device) return self.__class__(self.vars,obj,le,eq) def build_solution(self,x,obj_value,status,duals): _vars =", "obj is not None: obj_Q,obj_b,obj_c = [convert(x) for x in", "used:%s\",pre.name(),time.time()-start) if x is None: x = torch.zeros(p.n).float().to(config.device) start =", "[d.double() for d in duals] else: duals = duals.double() if", "is not None: self.eq.to(device) else: eq = None obj =", "= [convert(x) for x in obj] if obj_Q is not", "self.x = x self.duals = None def __str__(self): return \"obj_value:", "[convert(x) for x in le] if le_b.ndim == 2 and", "def float(self): if self.le is not None: le = self.le.float()", "le_b = le_b.squeeze(0) le = LinearLeConstraints(le_A,le_b) if eq is not", "None: obj = LinearObjective(obj_b,obj_c) if le is not None: le_A,le_b", "for x in le] if le_b.ndim == 2 and le_b.size(0)", "= self.le.float() else: le = None if self.eq is not", "post.postprocess(p,x,config) logger.debug(\"postprocessing name:%s, time used:%s\",post.name(),time.time()-start) return p.build_solution(x,p.obj(x),status,duals) def register_pres(self,*pres): self.pres.extend(pres)", "not None: obj = QuadraticObjective(obj_Q,obj_b,obj_c) elif obj_b is not None:", "mode, time used:%s\",time.time()-start) if status == SUB_OPTIMAL: start = time.time()", "status == SUB_OPTIMAL: start = time.time() # p = p.double()", "start = time.time() p,x = post.postprocess(p,x,config) logger.debug(\"postprocessing name:%s, time used:%s\",post.name(),time.time()-start)", "le = self.le.double() else: le = None if self.eq is", "name:%s, time used:%s\",post.name(),time.time()-start) return p.build_solution(x,p.obj(x),status,duals) def register_pres(self,*pres): self.pres.extend(pres) def register_posts(self,*posts):", "if le is not None: le_A,le_b = [convert(x) for x", "= duals.double() if status == SUB_OPTIMAL: start = time.time() #", "self.eq.double() else: eq = None obj = self.obj.double() return self.__class__(self.vars,obj,le,eq)", "time.time() p,x = post.postprocess(p,x,config) logger.debug(\"postprocessing name:%s, time used:%s\",post.name(),time.time()-start) return p.build_solution(x,p.obj(x),status,duals)", "in self.pres: start = time.time() p,x = pre.preprocess(p,x,config) logger.debug(\"preprocessing name:%s,", "= LinearEqConstraints(eq_A,eq_b) return cls(_vars,obj,le,eq) def float(self): if self.le is not", "== 1: eq_b = eq_b.squeeze(0) eq = LinearEqConstraints(eq_A,eq_b) return cls(_vars,obj,le,eq)", "is not None: obj = QuadraticObjective(obj_Q,obj_b,obj_c) elif obj_b is not", "= [convert(x) for x in eq] if eq_b.ndim == 2", "self.le.float() else: le = None if self.eq is not None:", "else: le = None if self.eq is not None: eq", "None: eq = self.eq.float() else: eq = None obj =", "time.time() p_f32 = p.float() x = x.float() x,_,status,duals = solve(p_f32,x,config,fast=True)", "is not None: self.le.to(device) else: le = None if self.eq" ]
[ "raw['img']) pad = Pad(square=True, pad_val=255) r = pad(raw) print(r['img'].shape) cv2.imshow('draw',", "as np import cv2 def test_pad(): raw = dict( img=np.zeros((200,", "from mmdet.datasets.pipelines.transforms import FilterBox import numpy as np import cv2", "import FilterBox import numpy as np import cv2 def test_pad():", "= Pad(square=True, pad_val=255) r = pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey()", "r = pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() def test_filter_box(): bboxes", "dict( img=np.zeros((402, 401, 3), dtype=np.uint8) ) cv2.imshow('raw', raw['img']) pad =", "20, 20], [10, 10, 19, 20], [10, 10, 20, 19],", "r['img']) cv2.waitKey() raw = dict( img=np.zeros((402, 401, 3), dtype=np.uint8) )", "np import cv2 def test_pad(): raw = dict( img=np.zeros((200, 401,", "= dict( img=np.zeros((402, 401, 3), dtype=np.uint8) ) cv2.imshow('raw', raw['img']) pad", "[10, 10, 20, 20], [10, 10, 19, 20], [10, 10,", "pad_val=255) r = pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() def test_filter_box():", "torch from mmdet.datasets.pipelines.transforms import Pad from mmdet.datasets.pipelines.transforms import FilterBox import", "10, 10], [10, 10, 20, 20], [10, 10, 19, 20],", "result = dict(gt_bboxes=bboxes) fb = FilterBox((10, 10)) fb(result) if __name__", "raw = dict( img=np.zeros((402, 401, 3), dtype=np.uint8) ) cv2.imshow('raw', raw['img'])", "fb = FilterBox((10, 10)) fb(result) if __name__ == '__main__': #", "import numpy as np import cv2 def test_pad(): raw =", "from mmdet.datasets.pipelines.transforms import Pad from mmdet.datasets.pipelines.transforms import FilterBox import numpy", "3), dtype=np.uint8) ) cv2.imshow('raw', raw['img']) pad = Pad(square=True, pad_val=255) r", "def test_pad(): raw = dict( img=np.zeros((200, 401, 3), dtype=np.uint8) )", "Pad(square=True, pad_val=255) r = pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() raw", "= pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() def test_filter_box(): bboxes =", "Pad(square=True, pad_val=255) r = pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() def", "10, 20, 19], [10, 10, 19, 19]]) gt_bboxes = np.array([[0,", "9]]) result = dict(gt_bboxes=bboxes) fb = FilterBox((10, 10)) fb(result) if", "20], [10, 10, 20, 19], [10, 10, 19, 19]]) gt_bboxes", "cv2.waitKey() raw = dict( img=np.zeros((402, 401, 3), dtype=np.uint8) ) cv2.imshow('raw',", "[10, 10, 19, 20], [10, 10, 20, 19], [10, 10,", "cv2.imshow('raw', raw['img']) pad = Pad(square=True, pad_val=255) r = pad(raw) print(r['img'].shape)", "np.array([[0, 0, 10, 9]]) result = dict(gt_bboxes=bboxes) fb = FilterBox((10,", "print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() def test_filter_box(): bboxes = np.array([[0, 0,", "bboxes = np.array([[0, 0, 10, 10], [10, 10, 20, 20],", "FilterBox import numpy as np import cv2 def test_pad(): raw", "FilterBox((10, 10)) fb(result) if __name__ == '__main__': # test_pad() test_filter_box()", "19], [10, 10, 19, 19]]) gt_bboxes = np.array([[0, 0, 10,", "pad = Pad(square=True, pad_val=255) r = pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img'])", "np.array([[0, 0, 10, 10], [10, 10, 20, 20], [10, 10,", "cv2.imshow('draw', r['img']) cv2.waitKey() def test_filter_box(): bboxes = np.array([[0, 0, 10,", "mmdet.datasets.pipelines.transforms import FilterBox import numpy as np import cv2 def", "10], [10, 10, 20, 20], [10, 10, 19, 20], [10,", "dict( img=np.zeros((200, 401, 3), dtype=np.uint8) ) cv2.imshow('raw', raw['img']) pad =", "dict(gt_bboxes=bboxes) fb = FilterBox((10, 10)) fb(result) if __name__ == '__main__':", "= dict( img=np.zeros((200, 401, 3), dtype=np.uint8) ) cv2.imshow('raw', raw['img']) pad", "19]]) gt_bboxes = np.array([[0, 0, 10, 9]]) result = dict(gt_bboxes=bboxes)", "pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() raw = dict( img=np.zeros((402, 401,", "10, 19, 19]]) gt_bboxes = np.array([[0, 0, 10, 9]]) result", "cv2.waitKey() def test_filter_box(): bboxes = np.array([[0, 0, 10, 10], [10,", "401, 3), dtype=np.uint8) ) cv2.imshow('raw', raw['img']) pad = Pad(square=True, pad_val=255)", "= dict(gt_bboxes=bboxes) fb = FilterBox((10, 10)) fb(result) if __name__ ==", "numpy as np import cv2 def test_pad(): raw = dict(", "pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() def test_filter_box(): bboxes = np.array([[0,", "= np.array([[0, 0, 10, 10], [10, 10, 20, 20], [10,", "20, 19], [10, 10, 19, 19]]) gt_bboxes = np.array([[0, 0,", "cv2 def test_pad(): raw = dict( img=np.zeros((200, 401, 3), dtype=np.uint8)", "= np.array([[0, 0, 10, 9]]) result = dict(gt_bboxes=bboxes) fb =", "import torch from mmdet.datasets.pipelines.transforms import Pad from mmdet.datasets.pipelines.transforms import FilterBox", "0, 10, 10], [10, 10, 20, 20], [10, 10, 19,", "= FilterBox((10, 10)) fb(result) if __name__ == '__main__': # test_pad()", "[10, 10, 20, 19], [10, 10, 19, 19]]) gt_bboxes =", "test_filter_box(): bboxes = np.array([[0, 0, 10, 10], [10, 10, 20,", "10, 20, 20], [10, 10, 19, 20], [10, 10, 20,", "10, 19, 20], [10, 10, 20, 19], [10, 10, 19,", "def test_filter_box(): bboxes = np.array([[0, 0, 10, 10], [10, 10,", "img=np.zeros((200, 401, 3), dtype=np.uint8) ) cv2.imshow('raw', raw['img']) pad = Pad(square=True,", "import cv2 def test_pad(): raw = dict( img=np.zeros((200, 401, 3),", "img=np.zeros((402, 401, 3), dtype=np.uint8) ) cv2.imshow('raw', raw['img']) pad = Pad(square=True,", "10, 9]]) result = dict(gt_bboxes=bboxes) fb = FilterBox((10, 10)) fb(result)", "Pad from mmdet.datasets.pipelines.transforms import FilterBox import numpy as np import", "dtype=np.uint8) ) cv2.imshow('raw', raw['img']) pad = Pad(square=True, pad_val=255) r =", "0, 10, 9]]) result = dict(gt_bboxes=bboxes) fb = FilterBox((10, 10))", ") cv2.imshow('raw', raw['img']) pad = Pad(square=True, pad_val=255) r = pad(raw)", "r['img']) cv2.waitKey() def test_filter_box(): bboxes = np.array([[0, 0, 10, 10],", "r = pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() raw = dict(", "19, 20], [10, 10, 20, 19], [10, 10, 19, 19]])", "print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() raw = dict( img=np.zeros((402, 401, 3),", "cv2.imshow('draw', r['img']) cv2.waitKey() raw = dict( img=np.zeros((402, 401, 3), dtype=np.uint8)", "20], [10, 10, 19, 20], [10, 10, 20, 19], [10,", "gt_bboxes = np.array([[0, 0, 10, 9]]) result = dict(gt_bboxes=bboxes) fb", "19, 19]]) gt_bboxes = np.array([[0, 0, 10, 9]]) result =", "pad_val=255) r = pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() raw =", "[10, 10, 19, 19]]) gt_bboxes = np.array([[0, 0, 10, 9]])", "raw = dict( img=np.zeros((200, 401, 3), dtype=np.uint8) ) cv2.imshow('raw', raw['img'])", "test_pad(): raw = dict( img=np.zeros((200, 401, 3), dtype=np.uint8) ) cv2.imshow('raw',", "mmdet.datasets.pipelines.transforms import Pad from mmdet.datasets.pipelines.transforms import FilterBox import numpy as", "= pad(raw) print(r['img'].shape) cv2.imshow('draw', r['img']) cv2.waitKey() raw = dict( img=np.zeros((402,", "import Pad from mmdet.datasets.pipelines.transforms import FilterBox import numpy as np" ]
[ "_mock_merge_kw_dict # Prepare a mock context fake_context = FakeContext() def", "'path', additional_aliases) if isinstance(expected,dict): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) assert merge_dict", "test_configuration_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 3) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self): \"\"\"", "assert isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib' assert isinstance(kw['additional_settings'], list)", "import lumberyard_modules import unittest import pytest import utils class FakeContext(object):", "scenario: Test the merge_kw_dict when only platform is set and", "when the platform + configuration is set, and the configuration", "= test_merge_kw_value pass test_include_settings.merge_kw_dict = _mock_merge_kw_dict # Prepare a mock", "lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) assert test.dict == expected @pytest.mark.parametrize( \"mock_json_map, additional_aliases,", "# Validate all the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform),", "def test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json, fake_context, mock_json_map, additional_aliases, section_key, expected): test_settings = lumberyard_modules.ProjectSettingsFile(fake_context,", "test_dict={}, additional_aliases={}): self.mock_json_map = {'path': test_dict} test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path',", "the sections passed to the merge_kw_dict self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test',", "dict( libpath='mylib', additional_settings=dict(stlibpath='mystlib') ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0]", "'path', additional_aliases) assert test.dict == expected @pytest.mark.parametrize( \"mock_json_map, additional_aliases, section_key,", "a mock include settings object test_include_settings = self.createSimpleSettings() def _mock_merge_kw_dict(target,", "# remove or modify any license notices. This file is", "passed to the merge_kw_dict self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name),", "fake_context = FakeContext() def _mock_get_project_settings_file(_a, _b): return test_include_settings fake_context.get_project_settings_file =", "test and a server configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name", "def createSimpleSettings(self, fake_context = FakeContext(), test_dict={}, additional_aliases={}): self.mock_json_map = {'path':", "base_test_configuration_name = 'base_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name, base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name)))) fake_context =", "'copyright_org', {'copyright_org': False}, {}, {'copyright_org': 'AMZN'}, type(Errors.WafError), id='InvalidStringKwInSourceError'), pytest.param('test_target', 'copyright_org',", "and the configuration is a test but not a server", "= old_parse_json_file @pytest.fixture() def fake_context(): return FakeContext() def test_SanitizeKWInput_SimpleKwDictionary_Success(): kw", "test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) return test_settings def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self): \"\"\"", "licensors. # # For complete copyright and license terms please", "fake_context, test_dict, fake_include_settings, mock_json_map, additional_aliases, expected): if fake_include_settings: def _mock_get_project_settings_file(include_settings_file,", "test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform, configuration=None)", "+ configuration is set, and the configuration is a server", "self.mock_json_map[path] def createSimpleSettings(self, fake_context = FakeContext(), test_dict={}, additional_aliases={}): self.mock_json_map =", "is_server=False) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform): self.assertEqual(platform,", "test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=False) fake_context", "fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict)", "FakeConfigurationSettings(object): def __init__(self, settings_name, base_config=None): self.base_config = base_config self.name =", "{}, {'client_only': 'True'}, type(Errors.WafError), id='InvalidBoolKwInTargetError'), pytest.param('test_target', 'client_only', {'client_only': False}, {},", "Do not # remove or modify any license notices. This", "mock_parse_json(mock_json_map): if not mock_json_map: mock_json_map = {'path': {}} def _mock_parse_json(path,", "pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': 'True'}, type(Errors.WafError), id='InvalidBoolKwInTargetError'), pytest.param('test_target',", "test_settings def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self): \"\"\" Test scenario: Setup a project settings", "object test_include_settings = self.createSimpleSettings() def _mock_merge_kw_dict(target, merge_kw, platform, configuration): merge_kw[test_merge_kw_key]", "self.assertEqual(platform, test_platform) return fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {}", "configuration=test_configuration) # Validate all the sections passed to the merge_kw_dict", "{'client_only': False}, {}, {'client_only': True}, {'client_only': False}, id='MergeBoolReplaceKwSuccess'), ]) def", "self.assertEqual(len(sections_merged), 11) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict", "utils.parse_json_file = old_parse_json_file @pytest.fixture() def fake_context(): return FakeContext() def test_SanitizeKWInput_SimpleKwDictionary_Success():", "= base_config self.name = settings_name class FakeConfiguration(object): def __init__(self, settings,", "test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when the platform", "recursively call merge_kw_dict recursively \"\"\" include_settings_file = 'include_test' test_settings_single_include =", "self.assertIn('{}/*'.format(test_alias), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test", "scenario: Test the merge_kw_dict when the platform + configuration is", "# For complete copyright and license terms please see the", "FakePlatformSettings(object): def __init__(self, platform_name, aliases=set()): self.platform = platform_name self.aliases =", "additional_aliases, merge_dict, expected): fake_context = FakeContext() test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path',", "fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform_name) return fake_platform_settings fake_context.get_platform_settings", "only platform is set and not any configurations \"\"\" test_platform", "configuration): merge_kw[test_merge_kw_key] = test_merge_kw_value pass test_include_settings.merge_kw_dict = _mock_merge_kw_dict # Prepare", "{} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=None, configuration=None) self.assertIn(test_merge_kw_key, test_merge_kw) self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value) def", "additional_aliases, section_key, expected): test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) merge_dict =", "Validate all the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged)", "class FakePlatformSettings(object): def __init__(self, platform_name, aliases=set()): self.platform = platform_name self.aliases", "portions of this file Copyright (c) Amazon.com, Inc. or its", "id='MergeStringReplaceSuccess'), pytest.param('test_target', 'client_only', {'client_only': 'False'}, {}, {'client_only': True}, type(Errors.WafError), id='InvalidBoolKwInSourceError'),", "True}, type(Errors.WafError), id='InvalidBoolKwInSourceError'), pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': 'True'},", "'test_section', {'key1': 'value1'}, id='SimpleChanges') ]) def test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json, fake_context, mock_json_map, additional_aliases,", "def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, fake_context, test_dict, fake_include_settings, mock_json_map, additional_aliases, expected): if fake_include_settings:", "False}, id='MergeBoolReplaceKwSuccess'), ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, target, kw_key, source_section, additional_aliases, merge_dict,", "def fake_context(): return FakeContext() def test_SanitizeKWInput_SimpleKwDictionary_Success(): kw = dict( libpath='mylib'", "} }, {}, 'test_section', {'key1': 'value1'}, id='SimpleChanges') ]) def test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json,", "assert expected == merge_dict class ProjectSettingsTest(unittest.TestCase): def setUp(self): self.old_parse_json =", "test_merge_kw_key = 'passed' test_merge_kw_value = True self.mock_json_map = {'path': test_settings_single_include,", "sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 8) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self):", "id='SimpleNoChange'), pytest.param({ 'path': { \"test_section\": { \"key1\": \"value1\" } }", "= {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) # Validate all the", "self.mock_json_map = {'path': test_settings_single_include, include_settings_file: test_empty_settings} # Prepare a mock", "all the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name,", "platform=None, configuration=None) self.assertIn(test_merge_kw_key, test_merge_kw) self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value) def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self): \"\"\" Test", "isinstance(expected, type(Errors.WafError)): with pytest.raises(Errors.WafError): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) @pytest.mark.parametrize( \"test_dict,", "affiliates or # its licensors. # # For complete copyright", "setUp(self): self.old_parse_json = utils.parse_json_file utils.parse_json_file = self.mockParseJson self.mock_json_map = {}", "test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) # Validate all the sections passed", "assert fake_include_settings == include_settings_file fake_settings = FakeIncludeSettings() return fake_settings fake_context.get_project_settings_file", "sections_merged) self.assertEqual(len(sections_merged), 11) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self): \"\"\" Test scenario: Test the", ") lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib' def", "}, {}, {'includes': ['include_test']}, id='BasicNoAdditionalAliasSingleAdditionalIncludes') ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, fake_context, test_dict,", "type(Errors.WafError), id='InvalidStringKwInTargetError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': 'A2Z'}, {'copyright_org':", "utils.parse_json_file utils.parse_json_file = _mock_parse_json yield utils.parse_json_file = old_parse_json_file @pytest.fixture() def", "Setup a project settings that contains other project settings, so", "with pytest.raises(Errors.WafError): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) @pytest.mark.parametrize( \"test_dict, fake_include_settings, mock_json_map,", "sections_merged) self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)", "passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name,", "kw_key, source_section, additional_aliases, merge_dict, expected\", [ pytest.param('test_target', 'fake_key', {}, {},", "def __init__(self, settings_name, base_config=None): self.base_config = base_config self.name = settings_name", "settings object test_include_settings = self.createSimpleSettings() def _mock_merge_kw_dict(target, merge_kw, platform, configuration):", "mock_json_map, additional_aliases, expected): if fake_include_settings: def _mock_get_project_settings_file(include_settings_file, additional_aliases): assert fake_include_settings", "notices. This file is distributed on an \"AS IS\" BASIS,", "= utils.parse_json_file utils.parse_json_file = self.mockParseJson self.mock_json_map = {} def tearDown(self):", "'include_test' test_settings_single_include = {'includes': [include_settings_file]} test_empty_settings = {} test_merge_kw_key =", "pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': True}, {'client_only': False}, id='MergeBoolReplaceKwSuccess'),", "sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test,dedicated', sections_merged)", "platform, configuration): merge_kw[test_merge_kw_key] = test_merge_kw_value pass test_include_settings.merge_kw_dict = _mock_merge_kw_dict #", "# Validate all the sections passed to the merge_kw_dict self.assertIn('{}/{}'.format(test_platform_name,", "any configurations \"\"\" test_platform = 'test_platform' test_alias = 'alias_1' fake_context", "{} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform, configuration=None) # Validate all the sections", "license terms please see the LICENSE at the root of", "def __init__(self, platform_name, aliases=set()): self.platform = platform_name self.aliases = aliases", "'fake_key', {}, {}, {}, {}, id='MissingKeyInSourceNoChange'), pytest.param('test_target', 'fake_key', {'fake_key': 'fake_value'},", "test nor server configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name =", "configurations \"\"\" test_platform = 'test_platform' test_alias = 'alias_1' fake_context =", "aliases=set()): self.platform = platform_name self.aliases = aliases class FakeConfigurationSettings(object): def", "= 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=False,", "set, and the configuration is a test but not a", "configuration, but is derived from another configuration \"\"\" test_platform_name =", "# its licensors. # # For complete copyright and license", "def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, target, kw_key, source_section, additional_aliases, merge_dict, expected): fake_context =", "list) assert isinstance(kw['additional_settings'][0], dict) assert isinstance(kw['additional_settings'][0]['stlibpath'], list) assert kw['additional_settings'][0]['stlibpath'][0] ==", "test_merge_kw) self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value) def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self): \"\"\" Test scenario: Test the", "= _mock_get_platform_settings test_dict = {} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged", "= {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform, configuration=None) # Validate all the", "test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 11) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self): \"\"\" Test scenario: Test", "def _mock_get_project_settings_file(include_settings_file, additional_aliases): assert fake_include_settings == include_settings_file fake_settings = FakeIncludeSettings()", "= self.createSimpleSettings(fake_context=fake_context, test_dict=test_settings_single_include) test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=None, configuration=None)", "pass class FakeIncludeSettings(object): pass class FakePlatformSettings(object): def __init__(self, platform_name, aliases=set()):", "assert isinstance(kw['additional_settings'], list) assert isinstance(kw['additional_settings'][0], dict) assert isinstance(kw['additional_settings'][0]['stlibpath'], list) assert", "_mock_get_platform_settings(platform): self.assertEqual(platform, test_platform_name) return fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings test_dict =", "test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 8) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test", "platform is set and not any configurations \"\"\" test_platform =", "def tearDown(self): utils.parse_json_file = self.old_parse_json def mockParseJson(self, path, _): return", "Prepare a mock include settings object test_include_settings = self.createSimpleSettings() def", "merge_dict, expected): fake_context = FakeContext() test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases)", "the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged)", "a test and a server configuration \"\"\" test_platform_name = 'test_platform'", "def mock_parse_json(mock_json_map): if not mock_json_map: mock_json_map = {'path': {}} def", "self.old_parse_json = utils.parse_json_file utils.parse_json_file = self.mockParseJson self.mock_json_map = {} def", "{} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set() def _mock_merge_kw_section(section,", "{}, {'fake_key': 'fake_value'}, id='MissingKeyInTargetKeyAdded'), pytest.param('test_target', 'copyright_org', {'copyright_org': False}, {}, {'copyright_org':", "'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=True) fake_context", "is_server=False): self.settings = settings self.is_test = is_test self.is_server = is_server", "fake_include_settings: def _mock_get_project_settings_file(include_settings_file, additional_aliases): assert fake_include_settings == include_settings_file fake_settings =", "= 'passed' test_merge_kw_value = True self.mock_json_map = {'path': test_settings_single_include, include_settings_file:", "= lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) merge_dict = {} test_settings.merge_kw_section(section_key=section_key, target='test_target', merge_kw=merge_dict)", "is_test=False, is_server=True) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform):", "class FakeContext(object): pass class FakeIncludeSettings(object): pass class FakePlatformSettings(object): def __init__(self,", "configuration is not a test nor server configuration, but is", "source_section, additional_aliases, merge_dict, expected): fake_context = FakeContext() test_settings = lumberyard_modules.ProjectSettingsFile(fake_context,", "is set, and the configuration is a test but not", "'no_section', {}, id='SimpleNoChange'), pytest.param({ 'path': { \"test_section\": { \"key1\": \"value1\"", "expected): test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) merge_dict = {} test_settings.merge_kw_section(section_key=section_key,", "mock context fake_context = FakeContext() def _mock_get_project_settings_file(_a, _b): return test_include_settings", "sections_merged) self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name),", "self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 11) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self): \"\"\"", "expected\", [ pytest.param('test_target', 'fake_key', {}, {}, {}, {}, id='MissingKeyInSourceNoChange'), pytest.param('test_target',", "configuration is set, and the configuration is a test and", "and license terms please see the LICENSE at the root", "Inc. or its affiliates or # its licensors. # #", "import unittest import pytest import utils class FakeContext(object): pass class", "self.assertEqual(platform, test_platform_name) return fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {}", "def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when only", "mockParseJson(self, path, _): return self.mock_json_map[path] def createSimpleSettings(self, fake_context = FakeContext(),", "sections_merged = set() def _mock_merge_kw_section(section, target, merge_kw): sections_merged.add(section) pass test_settings.merge_kw_section", "of this file Copyright (c) Amazon.com, Inc. or its affiliates", "merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged) self.assertEqual(len(sections_merged),", "merge_kw=test_merge_kw, platform=test_platform, configuration=None) # Validate all the sections passed to", "id='SimpleChanges') ]) def test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json, fake_context, mock_json_map, additional_aliases, section_key, expected): test_settings", "{}} def _mock_parse_json(path, _): return mock_json_map[path] old_parse_json_file = utils.parse_json_file utils.parse_json_file", "@pytest.fixture() def mock_parse_json(mock_json_map): if not mock_json_map: mock_json_map = {'path': {}}", "import Errors import lumberyard_modules import unittest import pytest import utils", "to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test', sections_merged)", "self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 3) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self): \"\"\" Test scenario:", "kw = dict( libpath='mylib' ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert", "project settings that contains other project settings, so that it", "= 'alias_1' fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform', aliases={test_alias}) def", "pytest.param('test_target', 'copyright_org', {'copyright_org': False}, {}, {'copyright_org': 'AMZN'}, type(Errors.WafError), id='InvalidStringKwInSourceError'), pytest.param('test_target',", "{'client_only': True}, {'client_only': False}, id='MergeBoolReplaceKwSuccess'), ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, target, kw_key,", "self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict", "return fake_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases)", "\"\"\" include_settings_file = 'include_test' test_settings_single_include = {'includes': [include_settings_file]} test_empty_settings =", "test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when the platform", "id='InvalidStringKwInTargetError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': 'A2Z'}, {'copyright_org': 'AMZN'},", "'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': False}, type(Errors.WafError), id='InvalidStringKwInTargetError'), pytest.param('test_target', 'copyright_org',", "test nor server configuration, but is derived from another configuration", "= _mock_merge_kw_dict # Prepare a mock context fake_context = FakeContext()", "= dict( libpath='mylib' ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0]", "nor server configuration, but is derived from another configuration \"\"\"", "sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test", "test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration)", "test_empty_settings = {} test_merge_kw_key = 'passed' test_merge_kw_value = True self.mock_json_map", "provided, by the license below or the license accompanying this", "{} test_merge_kw_key = 'passed' test_merge_kw_value = True self.mock_json_map = {'path':", "merge_dict == expected elif isinstance(expected, type(Errors.WafError)): with pytest.raises(Errors.WafError): test_settings.merge_kw_key(target=target, kw_key=kw_key,", "sections_merged) self.assertEqual(len(sections_merged), 8) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the", "self.assertIn('*/*/test', sections_merged) self.assertIn('{}/*/test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name),", "self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 11) def", "to the merge_kw_dict self.assertIn('{}/*'.format(test_platform), sections_merged) self.assertIn('{}/*'.format(test_alias), sections_merged) self.assertEqual(len(sections_merged), 2) def", "additional_aliases) if isinstance(expected,dict): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) assert merge_dict ==", "type(Errors.WafError)): with pytest.raises(Errors.WafError): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) @pytest.mark.parametrize( \"test_dict, fake_include_settings,", "configuration=None) self.assertIn(test_merge_kw_key, test_merge_kw) self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value) def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self): \"\"\" Test scenario:", "can recursively call merge_kw_dict recursively \"\"\" include_settings_file = 'include_test' test_settings_single_include", "sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the", "'fake_value'}, id='MissingKeyInTargetKeyAdded'), pytest.param('test_target', 'copyright_org', {'copyright_org': False}, {}, {'copyright_org': 'AMZN'}, type(Errors.WafError),", "include_settings_file fake_settings = FakeIncludeSettings() return fake_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test", "terms please see the LICENSE at the root of this", "or, if provided, by the license below or the license", "_mock_merge_kw_dict(target, merge_kw, platform, configuration): merge_kw[test_merge_kw_key] = test_merge_kw_value pass test_include_settings.merge_kw_dict =", "platform=test_platform, configuration=None) # Validate all the sections passed to the", "= FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform_name) return", "license below or the license accompanying this file. Do not", "WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.", "dict) assert isinstance(kw['additional_settings'][0]['stlibpath'], list) assert kw['additional_settings'][0]['stlibpath'][0] == 'mystlib' @pytest.mark.parametrize( \"target,", "merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self):", "to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated', sections_merged)", "to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name),", "self.aliases = aliases class FakeConfigurationSettings(object): def __init__(self, settings_name, base_config=None): self.base_config", "utils.parse_json_file = self.mockParseJson self.mock_json_map = {} def tearDown(self): utils.parse_json_file =", "additional_aliases, expected\", [ pytest.param({}, None, None, {}, {}, id='BasicNoAdditionalAliasNoAdditionalIncludes'), pytest.param({},", "assert test.dict == expected @pytest.mark.parametrize( \"mock_json_map, additional_aliases, section_key, expected\", [", "= FakeIncludeSettings() return fake_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test = lumberyard_modules.ProjectSettingsFile(fake_context,", "= is_server @pytest.fixture() def mock_parse_json(mock_json_map): if not mock_json_map: mock_json_map =", "is set, and the configuration is a server but not", "{}, {}, {'fake_key': 'fake_value'}, id='MissingKeyInTargetKeyAdded'), pytest.param('test_target', 'copyright_org', {'copyright_org': False}, {},", "'True'}, type(Errors.WafError), id='InvalidBoolKwInTargetError'), pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': True},", "is_server @pytest.fixture() def mock_parse_json(mock_json_map): if not mock_json_map: mock_json_map = {'path':", "base_config self.name = settings_name class FakeConfiguration(object): def __init__(self, settings, is_test=False,", "all the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform), sections_merged) self.assertIn('{}/*'.format(test_alias),", "sections_merged) self.assertIn('*/*/test', sections_merged) self.assertIn('{}/*/test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged)", "'includes': ['include_test'] },'include_test': {} }, {}, {'includes': ['include_test']}, id='BasicNoAdditionalAliasSingleAdditionalIncludes') ])", "pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': False}, type(Errors.WafError), id='InvalidStringKwInTargetError'), pytest.param('test_target',", "ProjectSettingsTest(unittest.TestCase): def setUp(self): self.old_parse_json = utils.parse_json_file utils.parse_json_file = self.mockParseJson self.mock_json_map", "= self.old_parse_json def mockParseJson(self, path, _): return self.mock_json_map[path] def createSimpleSettings(self,", "'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=True) fake_context = FakeContext() fake_platform_settings", "= {} test_merge_kw_key = 'passed' test_merge_kw_value = True self.mock_json_map =", "base_config=None): self.base_config = base_config self.name = settings_name class FakeConfiguration(object): def", "self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario:", "fake_settings = FakeIncludeSettings() return fake_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test =", "BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either", "'AMZN'}, {}, {'copyright_org': 'A2Z'}, {'copyright_org': 'AMZN'}, id='MergeStringReplaceSuccess'), pytest.param('test_target', 'client_only', {'client_only':", "test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) @pytest.mark.parametrize( \"test_dict, fake_include_settings, mock_json_map, additional_aliases, expected\",", "if isinstance(expected,dict): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) assert merge_dict == expected", "merge_dict = {} test_settings.merge_kw_section(section_key=section_key, target='test_target', merge_kw=merge_dict) assert expected == merge_dict", "Test the merge_kw_dict when only platform is set and not", "'path': { \"test_section\": { \"key1\": \"value1\" } } }, {},", "settings_name, base_config=None): self.base_config = base_config self.name = settings_name class FakeConfiguration(object):", "'passed' test_merge_kw_value = True self.mock_json_map = {'path': test_settings_single_include, include_settings_file: test_empty_settings}", "test_dict, fake_include_settings, mock_json_map, additional_aliases, expected): if fake_include_settings: def _mock_get_project_settings_file(include_settings_file, additional_aliases):", "{ 'includes': ['include_test'] },'include_test': {} }, {}, {'includes': ['include_test']}, id='BasicNoAdditionalAliasSingleAdditionalIncludes')", "the merge_kw_dict when only platform is set and not any", "is set, and the configuration is a test and a", "Validate all the sections passed to the merge_kw_dict self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name),", "{}, {'client_only': True}, type(Errors.WafError), id='InvalidBoolKwInSourceError'), pytest.param('test_target', 'client_only', {'client_only': False}, {},", "test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when the platform", "either express or implied. # from waflib import Errors import", "pass test_include_settings.merge_kw_dict = _mock_merge_kw_dict # Prepare a mock context fake_context", "self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test', sections_merged) self.assertIn('{}/*/test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test'.format(test_platform_name,", "{'path': test_dict} test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) return test_settings def", "path, _): return self.mock_json_map[path] def createSimpleSettings(self, fake_context = FakeContext(), test_dict={},", "# # All or portions of this file Copyright (c)", "the root of this # distribution (the \"License\"). All use", "{'fake_key': 'fake_value'}, id='MissingKeyInTargetKeyAdded'), pytest.param('test_target', 'copyright_org', {'copyright_org': False}, {}, {'copyright_org': 'AMZN'},", "source_section, additional_aliases, merge_dict, expected\", [ pytest.param('test_target', 'fake_key', {}, {}, {},", "FakePlatformSettings(platform_name='test_platform', aliases={test_alias}) def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform) return fake_platform_settings fake_context.get_platform_settings =", "test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when the platform", "def _mock_merge_kw_dict(target, merge_kw, platform, configuration): merge_kw[test_merge_kw_key] = test_merge_kw_value pass test_include_settings.merge_kw_dict", "contains other project settings, so that it can recursively call", "{ \"test_section\": { \"key1\": \"value1\" } } }, {}, 'test_section',", "\"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name))", "but is derived from another configuration \"\"\" test_platform_name = 'test_platform'", "\"\"\" Test scenario: Setup a project settings that contains other", "'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=False, is_server=True)", "{}, {'client_only': True}, {'client_only': False}, id='MergeBoolReplaceKwSuccess'), ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, target,", "test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=None, configuration=None) self.assertIn(test_merge_kw_key, test_merge_kw) self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value) def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self):", "self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value) def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict", "This file is distributed on an \"AS IS\" BASIS, #", "def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success(): kw = dict( libpath='mylib', additional_settings=dict(stlibpath='mystlib') ) lumberyard_modules.sanitize_kw_input(kw) assert", "assert kw['additional_settings'][0]['stlibpath'][0] == 'mystlib' @pytest.mark.parametrize( \"target, kw_key, source_section, additional_aliases, merge_dict,", "{} }, {}, {'includes': ['include_test']}, id='BasicNoAdditionalAliasSingleAdditionalIncludes') ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, fake_context,", "@pytest.mark.parametrize( \"test_dict, fake_include_settings, mock_json_map, additional_aliases, expected\", [ pytest.param({}, None, None,", "set, and the configuration is a server but not a", "not a server configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name =", "\"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' base_test_configuration_name = 'base_configuration'", "test_SanitizeKWInput_SimpleKwDictionary_Success(): kw = dict( libpath='mylib' ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list)", "False}, {}, {'copyright_org': 'AMZN'}, type(Errors.WafError), id='InvalidStringKwInSourceError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'},", "settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=False, is_server=True) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def", "additional_aliases) return test_settings def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self): \"\"\" Test scenario: Setup a", "settings, so that it can recursively call merge_kw_dict recursively \"\"\"", "the configuration is not a test nor server configuration \"\"\"", "test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=False, is_server=True) fake_context", "sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged)", "fake_context.get_project_settings_file = _mock_get_project_settings_file test = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) assert test.dict", "2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when", "target='test_target', merge_kw=merge_dict) assert expected == merge_dict class ProjectSettingsTest(unittest.TestCase): def setUp(self):", "= _mock_get_project_settings_file test = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) assert test.dict ==", "= 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True,", "self.old_parse_json def mockParseJson(self, path, _): return self.mock_json_map[path] def createSimpleSettings(self, fake_context", "merge_kw, platform, configuration): merge_kw[test_merge_kw_key] = test_merge_kw_value pass test_include_settings.merge_kw_dict = _mock_merge_kw_dict", "sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test', sections_merged) self.assertIn('{}/*/test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name),", "sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform), sections_merged) self.assertIn('{}/*'.format(test_alias), sections_merged) self.assertEqual(len(sections_merged),", "libpath='mylib', additional_settings=dict(stlibpath='mystlib') ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0] ==", "sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the", "\"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name),", "lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib' def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success():", "the merge_kw_dict when the platform + configuration is set, and", "section_key, expected\", [ pytest.param(None, {}, 'no_section', {}, id='SimpleNoChange'), pytest.param({ 'path':", "def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when the", "'AMZN'}, type(Errors.WafError), id='InvalidStringKwInSourceError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': False},", "test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) assert merge_dict == expected elif isinstance(expected,", "{}, id='BasicNoAdditionalAliasNoAdditionalIncludes'), pytest.param({}, 'include_test', { 'path': { 'includes': ['include_test'] },'include_test':", "a test but not a server configuration \"\"\" test_platform_name =", "self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 11) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self): \"\"\" Test scenario:", "context fake_context = FakeContext() def _mock_get_project_settings_file(_a, _b): return test_include_settings fake_context.get_project_settings_file", "{}, 'no_section', {}, id='SimpleNoChange'), pytest.param({ 'path': { \"test_section\": { \"key1\":", "pass test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name,", "FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform', aliases={test_alias}) def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform) return", "test_dict=test_dict) sections_merged = set() def _mock_merge_kw_section(section, target, merge_kw): sections_merged.add(section) pass", "fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform_name)", "when only platform is set and not any configurations \"\"\"", "fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {} test_settings = self.createSimpleSettings(fake_context=fake_context,", "FakeIncludeSettings() return fake_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test = lumberyard_modules.ProjectSettingsFile(fake_context, 'path',", "License, # or, if provided, by the license below or", "test_merge_kw_value) def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when", "# Prepare a mock include settings object test_include_settings = self.createSimpleSettings()", "the license below or the license accompanying this file. Do", "kw['additional_settings'][0]['stlibpath'][0] == 'mystlib' @pytest.mark.parametrize( \"target, kw_key, source_section, additional_aliases, merge_dict, expected\",", "type(Errors.WafError), id='InvalidBoolKwInTargetError'), pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': True}, {'client_only':", "= 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name)) fake_context = FakeContext() fake_platform_settings =", "test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name, base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name)))) fake_context = FakeContext() fake_platform_settings =", "is_test=True, is_server=False) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform):", "class FakeConfiguration(object): def __init__(self, settings, is_test=False, is_server=False): self.settings = settings", "sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 11) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self): \"\"\" Test", "_mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) # Validate", "project settings, so that it can recursively call merge_kw_dict recursively", "sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name),", "modify any license notices. This file is distributed on an", "is distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES", "False}, type(Errors.WafError), id='InvalidStringKwInTargetError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': 'A2Z'},", "= FakeContext() def _mock_get_project_settings_file(_a, _b): return test_include_settings fake_context.get_project_settings_file = _mock_get_project_settings_file", "configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration =", "]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, fake_context, test_dict, fake_include_settings, mock_json_map, additional_aliases, expected): if", ") lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib' assert", "kw_key, source_section, additional_aliases, merge_dict, expected): fake_context = FakeContext() test_settings =", "+ configuration is set, and the configuration is not a", "the license accompanying this file. Do not # remove or", "[ pytest.param({}, None, None, {}, {}, id='BasicNoAdditionalAliasNoAdditionalIncludes'), pytest.param({}, 'include_test', {", "def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform) return fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings test_dict", "FakeContext(object): pass class FakeIncludeSettings(object): pass class FakePlatformSettings(object): def __init__(self, platform_name,", "self.is_server = is_server @pytest.fixture() def mock_parse_json(mock_json_map): if not mock_json_map: mock_json_map", "self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 3)", "8) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when", "the configuration is not a test nor server configuration, but", "this file. Do not # remove or modify any license", "configuration is set, and the configuration is not a test", "additional_settings=dict(stlibpath='mystlib') ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib'", "self.mock_json_map = {'path': test_dict} test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) return", "'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name)) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform')", "and a server configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name =", "{}, {'copyright_org': 'AMZN'}, type(Errors.WafError), id='InvalidStringKwInSourceError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {},", "please see the LICENSE at the root of this #", "return test_settings def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self): \"\"\" Test scenario: Setup a project", "self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name),", "test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_settings_single_include) test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=None,", "settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=False) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def", "= FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform_name) return fake_platform_settings fake_context.get_platform_settings =", "configuration=None) # Validate all the sections passed to the merge_kw_dict", "self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self): \"\"\"", "class ProjectSettingsTest(unittest.TestCase): def setUp(self): self.old_parse_json = utils.parse_json_file utils.parse_json_file = self.mockParseJson", "license notices. This file is distributed on an \"AS IS\"", "self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 3) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self):", "sections_merged) self.assertIn('{}/*/test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)", "'mylib' assert isinstance(kw['additional_settings'], list) assert isinstance(kw['additional_settings'][0], dict) assert isinstance(kw['additional_settings'][0]['stlibpath'], list)", "all the sections passed to the merge_kw_dict self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)", "'mystlib' @pytest.mark.parametrize( \"target, kw_key, source_section, additional_aliases, merge_dict, expected\", [ pytest.param('test_target',", "aliases={test_alias}) def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform) return fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings", "test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test", "OR CONDITIONS OF ANY KIND, either express or implied. #", "{} def tearDown(self): utils.parse_json_file = self.old_parse_json def mockParseJson(self, path, _):", "test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, fake_context, test_dict, fake_include_settings, mock_json_map, additional_aliases, expected): if fake_include_settings: def", "{}, {'copyright_org': False}, type(Errors.WafError), id='InvalidStringKwInTargetError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {},", "not a test nor server configuration, but is derived from", "@pytest.mark.parametrize( \"target, kw_key, source_section, additional_aliases, merge_dict, expected\", [ pytest.param('test_target', 'fake_key',", "tearDown(self): utils.parse_json_file = self.old_parse_json def mockParseJson(self, path, _): return self.mock_json_map[path]", "set() def _mock_merge_kw_section(section, target, merge_kw): sections_merged.add(section) pass test_settings.merge_kw_section = _mock_merge_kw_section", "{'client_only': 'True'}, type(Errors.WafError), id='InvalidBoolKwInTargetError'), pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only':", "assert isinstance(kw['additional_settings'][0]['stlibpath'], list) assert kw['additional_settings'][0]['stlibpath'][0] == 'mystlib' @pytest.mark.parametrize( \"target, kw_key,", "= {'includes': [include_settings_file]} test_empty_settings = {} test_merge_kw_key = 'passed' test_merge_kw_value", "'test_platform' test_alias = 'alias_1' fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform',", "nor server configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration'", "file Copyright (c) Amazon.com, Inc. or its affiliates or #", "the LICENSE at the root of this # distribution (the", "a server configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration'", "= 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=True) fake_context = FakeContext()", "merge_kw_dict self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name),", "(c) Amazon.com, Inc. or its affiliates or # its licensors.", "class FakeIncludeSettings(object): pass class FakePlatformSettings(object): def __init__(self, platform_name, aliases=set()): self.platform", "{}, {}, {}, {}, id='MissingKeyInSourceNoChange'), pytest.param('test_target', 'fake_key', {'fake_key': 'fake_value'}, {},", "test = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) assert test.dict == expected @pytest.mark.parametrize(", "platform=test_platform_name, configuration=test_configuration) # Validate all the sections passed to the", "accompanying this file. Do not # remove or modify any", "license accompanying this file. Do not # remove or modify", "_mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform, configuration=None) # Validate", "@pytest.mark.parametrize( \"mock_json_map, additional_aliases, section_key, expected\", [ pytest.param(None, {}, 'no_section', {},", "so that it can recursively call merge_kw_dict recursively \"\"\" include_settings_file", "recursively \"\"\" include_settings_file = 'include_test' test_settings_single_include = {'includes': [include_settings_file]} test_empty_settings", "FakeConfiguration(object): def __init__(self, settings, is_test=False, is_server=False): self.settings = settings self.is_test", "its licensors. # # For complete copyright and license terms", "= settings self.is_test = is_test self.is_server = is_server @pytest.fixture() def", "merge_kw_dict when the platform + configuration is set, and the", "that contains other project settings, so that it can recursively", "the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test', sections_merged) self.assertIn('{}/*/test'.format(test_platform_name),", "is_test=False, is_server=False): self.settings = settings self.is_test = is_test self.is_server =", "{} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) # Validate all the sections", "def setUp(self): self.old_parse_json = utils.parse_json_file utils.parse_json_file = self.mockParseJson self.mock_json_map =", "= _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) #", "FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=False) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform')", "{'copyright_org': False}, {}, {'copyright_org': 'AMZN'}, type(Errors.WafError), id='InvalidStringKwInSourceError'), pytest.param('test_target', 'copyright_org', {'copyright_org':", "sections_merged) self.assertIn('{}/*'.format(test_alias), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario:", "is set, and the configuration is not a test nor", "sections_merged.add(section) pass test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw,", "additional_aliases, expected): if fake_include_settings: def _mock_get_project_settings_file(include_settings_file, additional_aliases): assert fake_include_settings ==", "assert isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib' def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success(): kw", "= settings_name class FakeConfiguration(object): def __init__(self, settings, is_test=False, is_server=False): self.settings", "base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name)))) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform): self.assertEqual(platform,", "merge_kw[test_merge_kw_key] = test_merge_kw_value pass test_include_settings.merge_kw_dict = _mock_merge_kw_dict # Prepare a", "self.mock_json_map = {} def tearDown(self): utils.parse_json_file = self.old_parse_json def mockParseJson(self,", "settings, is_test=False, is_server=False): self.settings = settings self.is_test = is_test self.is_server", "include_settings_file = 'include_test' test_settings_single_include = {'includes': [include_settings_file]} test_empty_settings = {}", "id='InvalidStringKwInSourceError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': False}, type(Errors.WafError), id='InvalidStringKwInTargetError'),", "sections_merged) self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 11)", "kw['libpath'][0] == 'mylib' def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success(): kw = dict( libpath='mylib', additional_settings=dict(stlibpath='mystlib')", "= _mock_get_project_settings_file test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_settings_single_include) test_merge_kw = {} test_settings.merge_kw_dict(target='test_target',", "self.assertEqual(len(sections_merged), 3) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict", "test_include_settings = self.createSimpleSettings() def _mock_merge_kw_dict(target, merge_kw, platform, configuration): merge_kw[test_merge_kw_key] =", "= self.mockParseJson self.mock_json_map = {} def tearDown(self): utils.parse_json_file = self.old_parse_json", "not mock_json_map: mock_json_map = {'path': {}} def _mock_parse_json(path, _): return", "= lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) assert test.dict == expected @pytest.mark.parametrize( \"mock_json_map,", "= aliases class FakeConfigurationSettings(object): def __init__(self, settings_name, base_config=None): self.base_config =", "utils.parse_json_file = _mock_parse_json yield utils.parse_json_file = old_parse_json_file @pytest.fixture() def fake_context():", "set, and the configuration is a test and a server", "\"mock_json_map, additional_aliases, section_key, expected\", [ pytest.param(None, {}, 'no_section', {}, id='SimpleNoChange'),", "isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib' assert isinstance(kw['additional_settings'], list) assert", "isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib' def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success(): kw =", "= FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=False) fake_context = FakeContext() fake_platform_settings =", "{'copyright_org': 'AMZN'}, {}, {'copyright_org': False}, type(Errors.WafError), id='InvalidStringKwInTargetError'), pytest.param('test_target', 'copyright_org', {'copyright_org':", "import pytest import utils class FakeContext(object): pass class FakeIncludeSettings(object): pass", "expected\", [ pytest.param(None, {}, 'no_section', {}, id='SimpleNoChange'), pytest.param({ 'path': {", "{}, id='MissingKeyInSourceNoChange'), pytest.param('test_target', 'fake_key', {'fake_key': 'fake_value'}, {}, {}, {'fake_key': 'fake_value'},", "settings_name class FakeConfiguration(object): def __init__(self, settings, is_test=False, is_server=False): self.settings =", "_mock_parse_json yield utils.parse_json_file = old_parse_json_file @pytest.fixture() def fake_context(): return FakeContext()", "is a test but not a server configuration \"\"\" test_platform_name", "def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when the", "FakeContext() def test_SanitizeKWInput_SimpleKwDictionary_Success(): kw = dict( libpath='mylib' ) lumberyard_modules.sanitize_kw_input(kw) assert", "express or implied. # from waflib import Errors import lumberyard_modules", "mock_json_map, additional_aliases, section_key, expected): test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) merge_dict", "test_dict} test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) return test_settings def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self):", "set and not any configurations \"\"\" test_platform = 'test_platform' test_alias", "'path': { 'includes': ['include_test'] },'include_test': {} }, {}, {'includes': ['include_test']},", "= platform_name self.aliases = aliases class FakeConfigurationSettings(object): def __init__(self, settings_name,", "pytest.param({}, None, None, {}, {}, id='BasicNoAdditionalAliasNoAdditionalIncludes'), pytest.param({}, 'include_test', { 'path':", "\"value1\" } } }, {}, 'test_section', {'key1': 'value1'}, id='SimpleChanges') ])", "{'copyright_org': 'A2Z'}, {'copyright_org': 'AMZN'}, id='MergeStringReplaceSuccess'), pytest.param('test_target', 'client_only', {'client_only': 'False'}, {},", "IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,", "self.platform = platform_name self.aliases = aliases class FakeConfigurationSettings(object): def __init__(self,", "[ pytest.param('test_target', 'fake_key', {}, {}, {}, {}, id='MissingKeyInSourceNoChange'), pytest.param('test_target', 'fake_key',", "= 'base_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name, base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name)))) fake_context = FakeContext()", "_mock_get_project_settings_file test = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) assert test.dict == expected", "self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)", "= set() def _mock_merge_kw_section(section, target, merge_kw): sections_merged.add(section) pass test_settings.merge_kw_section =", "is governed by the License, # or, if provided, by", "mock_json_map: mock_json_map = {'path': {}} def _mock_parse_json(path, _): return mock_json_map[path]", "test_dict = {} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set()", "configuration is set, and the configuration is a server but", "'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name)) fake_context = FakeContext()", "{}, {'copyright_org': 'A2Z'}, {'copyright_org': 'AMZN'}, id='MergeStringReplaceSuccess'), pytest.param('test_target', 'client_only', {'client_only': 'False'},", "ANY KIND, either express or implied. # from waflib import", "lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) if isinstance(expected,dict): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) assert", "== include_settings_file fake_settings = FakeIncludeSettings() return fake_settings fake_context.get_project_settings_file = _mock_get_project_settings_file", "[ pytest.param(None, {}, 'no_section', {}, id='SimpleNoChange'), pytest.param({ 'path': { \"test_section\":", "WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or", "id='MergeBoolReplaceKwSuccess'), ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, target, kw_key, source_section, additional_aliases, merge_dict, expected):", "platform_name, aliases=set()): self.platform = platform_name self.aliases = aliases class FakeConfigurationSettings(object):", "'client_only', {'client_only': 'False'}, {}, {'client_only': True}, type(Errors.WafError), id='InvalidBoolKwInSourceError'), pytest.param('test_target', 'client_only',", "= FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name)) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform):", "passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated',", "test but not a server configuration \"\"\" test_platform_name = 'test_platform'", "if provided, by the license below or the license accompanying", "file. Do not # remove or modify any license notices.", "test_platform = 'test_platform' test_alias = 'alias_1' fake_context = FakeContext() fake_platform_settings", "merge_kw=test_merge_kw, platform=None, configuration=None) self.assertIn(test_merge_kw_key, test_merge_kw) self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value) def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self): \"\"\"", "any license notices. This file is distributed on an \"AS", "additional_aliases={}): self.mock_json_map = {'path': test_dict} test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases)", "self.createSimpleSettings(fake_context=fake_context, test_dict=test_settings_single_include) test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=None, configuration=None) self.assertIn(test_merge_kw_key,", "base_test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 3) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self): \"\"\" Test scenario: Test", "mock_json_map = {'path': {}} def _mock_parse_json(path, _): return mock_json_map[path] old_parse_json_file", "self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test', sections_merged) self.assertIn('{}/*/test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged)", "root of this # distribution (the \"License\"). All use of", "{}, {'includes': ['include_test']}, id='BasicNoAdditionalAliasSingleAdditionalIncludes') ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, fake_context, test_dict, fake_include_settings,", "test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform, configuration=None) # Validate all the sections passed", "def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when the", "implied. # from waflib import Errors import lumberyard_modules import unittest", "on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS", "merge_kw=merge_dict) assert merge_dict == expected elif isinstance(expected, type(Errors.WafError)): with pytest.raises(Errors.WafError):", "FakeContext() def _mock_get_project_settings_file(_a, _b): return test_include_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test_settings", "} } }, {}, 'test_section', {'key1': 'value1'}, id='SimpleChanges') ]) def", "= dict( libpath='mylib', additional_settings=dict(stlibpath='mystlib') ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert", "self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)", "self.assertIn('*/*/dedicated', sections_merged) self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name),", "mock_json_map, additional_aliases, expected\", [ pytest.param({}, None, None, {}, {}, id='BasicNoAdditionalAliasNoAdditionalIncludes'),", "merge_kw=merge_dict) @pytest.mark.parametrize( \"test_dict, fake_include_settings, mock_json_map, additional_aliases, expected\", [ pytest.param({}, None,", "= _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform, configuration=None) #", "# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express", "self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 8) def", "= True self.mock_json_map = {'path': test_settings_single_include, include_settings_file: test_empty_settings} # Prepare", "# or, if provided, by the license below or the", "the License, # or, if provided, by the license below", "_mock_get_platform_settings test_dict = {} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged =", "utils.parse_json_file = self.old_parse_json def mockParseJson(self, path, _): return self.mock_json_map[path] def", "__init__(self, settings_name, base_config=None): self.base_config = base_config self.name = settings_name class", "test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=True) fake_context =", "not any configurations \"\"\" test_platform = 'test_platform' test_alias = 'alias_1'", "id='MissingKeyInTargetKeyAdded'), pytest.param('test_target', 'copyright_org', {'copyright_org': False}, {}, {'copyright_org': 'AMZN'}, type(Errors.WafError), id='InvalidStringKwInSourceError'),", "= FakePlatformSettings(platform_name='test_platform', aliases={test_alias}) def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform) return fake_platform_settings fake_context.get_platform_settings", "'alias_1' fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform', aliases={test_alias}) def _mock_get_platform_settings(platform):", "= {} test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set() def", "a test configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration'", "call merge_kw_dict recursively \"\"\" include_settings_file = 'include_test' test_settings_single_include = {'includes':", "self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated', sections_merged) self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated'.format(test_platform_name,", "# distribution (the \"License\"). All use of this software is", "expected == merge_dict class ProjectSettingsTest(unittest.TestCase): def setUp(self): self.old_parse_json = utils.parse_json_file", "'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': 'A2Z'}, {'copyright_org': 'AMZN'}, id='MergeStringReplaceSuccess'), pytest.param('test_target',", "self.assertIn('{}/*'.format(test_platform), sections_merged) self.assertIn('{}/*'.format(test_alias), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test", "= 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name)) fake_context =", "self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 8) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self): \"\"\" Test scenario:", "== expected @pytest.mark.parametrize( \"mock_json_map, additional_aliases, section_key, expected\", [ pytest.param(None, {},", "test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True,", "def __init__(self, settings, is_test=False, is_server=False): self.settings = settings self.is_test =", "list) assert kw['libpath'][0] == 'mylib' def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success(): kw = dict(", "= is_test self.is_server = is_server @pytest.fixture() def mock_parse_json(mock_json_map): if not", "_mock_get_project_settings_file(_a, _b): return test_include_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test_settings = self.createSimpleSettings(fake_context=fake_context,", "remove or modify any license notices. This file is distributed", "{'copyright_org': 'AMZN'}, type(Errors.WafError), id='InvalidStringKwInSourceError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org':", "fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform', aliases={test_alias}) def _mock_get_platform_settings(platform): self.assertEqual(platform,", "configuration is a test and a server configuration \"\"\" test_platform_name", "= 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=True)", "(the \"License\"). All use of this software is governed by", "sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated', sections_merged) self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name),", "{'copyright_org': 'AMZN'}, id='MergeStringReplaceSuccess'), pytest.param('test_target', 'client_only', {'client_only': 'False'}, {}, {'client_only': True},", "{}, {}, id='BasicNoAdditionalAliasNoAdditionalIncludes'), pytest.param({}, 'include_test', { 'path': { 'includes': ['include_test']", "the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform), sections_merged) self.assertIn('{}/*'.format(test_alias), sections_merged)", "test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success(): kw = dict( libpath='mylib', additional_settings=dict(stlibpath='mystlib') ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'],", "sections_merged) self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name),", "assert isinstance(kw['additional_settings'][0], dict) assert isinstance(kw['additional_settings'][0]['stlibpath'], list) assert kw['additional_settings'][0]['stlibpath'][0] == 'mystlib'", "and the configuration is not a test nor server configuration", "test_platform_name) return fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {} test_settings", "dict( libpath='mylib' ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0] ==", "# # For complete copyright and license terms please see", "def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when the", "11) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when", "target, merge_kw): sections_merged.add(section) pass test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {}", "'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=False) fake_context = FakeContext()", "from another configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration'", "fake_context, mock_json_map, additional_aliases, section_key, expected): test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases)", "configuration is a test but not a server configuration \"\"\"", "= self.createSimpleSettings() def _mock_merge_kw_dict(target, merge_kw, platform, configuration): merge_kw[test_merge_kw_key] = test_merge_kw_value", "KIND, either express or implied. # from waflib import Errors", "distribution (the \"License\"). All use of this software is governed", "__init__(self, platform_name, aliases=set()): self.platform = platform_name self.aliases = aliases class", "source_section=source_section, merge_kw=merge_dict) assert merge_dict == expected elif isinstance(expected, type(Errors.WafError)): with", "FakeContext(), test_dict={}, additional_aliases={}): self.mock_json_map = {'path': test_dict} test_settings = lumberyard_modules.ProjectSettingsFile(fake_context,", "test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name)) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def", "test_configuration_name), sections_merged) self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged),", "copyright and license terms please see the LICENSE at the", "test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=True) fake_context = FakeContext() fake_platform_settings =", "merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) # Validate all the sections passed to", "== 'mystlib' @pytest.mark.parametrize( \"target, kw_key, source_section, additional_aliases, merge_dict, expected\", [", "utils class FakeContext(object): pass class FakeIncludeSettings(object): pass class FakePlatformSettings(object): def", "additional_aliases, merge_dict, expected\", [ pytest.param('test_target', 'fake_key', {}, {}, {}, {},", "\"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(", "test_settings_single_include, include_settings_file: test_empty_settings} # Prepare a mock include settings object", "utils.parse_json_file utils.parse_json_file = self.mockParseJson self.mock_json_map = {} def tearDown(self): utils.parse_json_file", "FakeIncludeSettings(object): pass class FakePlatformSettings(object): def __init__(self, platform_name, aliases=set()): self.platform =", "= FakeContext(), test_dict={}, additional_aliases={}): self.mock_json_map = {'path': test_dict} test_settings =", "and not any configurations \"\"\" test_platform = 'test_platform' test_alias =", "return self.mock_json_map[path] def createSimpleSettings(self, fake_context = FakeContext(), test_dict={}, additional_aliases={}): self.mock_json_map", "None, {}, {}, id='BasicNoAdditionalAliasNoAdditionalIncludes'), pytest.param({}, 'include_test', { 'path': { 'includes':", "not a test nor server configuration \"\"\" test_platform_name = 'test_platform'", "to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 2)", "fake_context = FakeContext() test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) if isinstance(expected,dict):", "test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name)) fake_context = FakeContext() fake_platform_settings", "additional_aliases, section_key, expected\", [ pytest.param(None, {}, 'no_section', {}, id='SimpleNoChange'), pytest.param({", "= FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=False, is_server=True) fake_context = FakeContext() fake_platform_settings =", "fake_context.get_project_settings_file = _mock_get_project_settings_file test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_settings_single_include) test_merge_kw = {}", "_mock_parse_json(path, _): return mock_json_map[path] old_parse_json_file = utils.parse_json_file utils.parse_json_file = _mock_parse_json", "pytest.param('test_target', 'fake_key', {}, {}, {}, {}, id='MissingKeyInSourceNoChange'), pytest.param('test_target', 'fake_key', {'fake_key':", "assert kw['libpath'][0] == 'mylib' assert isinstance(kw['additional_settings'], list) assert isinstance(kw['additional_settings'][0], dict)", "def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self): \"\"\" Test scenario: Setup a project settings that", "the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 2) def", "the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated', sections_merged) self.assertIn('{}/*/dedicated'.format(test_platform_name),", "Errors import lumberyard_modules import unittest import pytest import utils class", "LICENSE at the root of this # distribution (the \"License\").", "# Prepare a mock context fake_context = FakeContext() def _mock_get_project_settings_file(_a,", "\"test_section\": { \"key1\": \"value1\" } } }, {}, 'test_section', {'key1':", "sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 8) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self): \"\"\" Test", "False}, {}, {'client_only': 'True'}, type(Errors.WafError), id='InvalidBoolKwInTargetError'), pytest.param('test_target', 'client_only', {'client_only': False},", "lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) merge_dict = {} test_settings.merge_kw_section(section_key=section_key, target='test_target', merge_kw=merge_dict) assert", "the configuration is a test but not a server configuration", "def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform_name) return fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings test_dict", "aliases class FakeConfigurationSettings(object): def __init__(self, settings_name, base_config=None): self.base_config = base_config", "= {'path': {}} def _mock_parse_json(path, _): return mock_json_map[path] old_parse_json_file =", "def test_SanitizeKWInput_SimpleKwDictionary_Success(): kw = dict( libpath='mylib' ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'],", "old_parse_json_file = utils.parse_json_file utils.parse_json_file = _mock_parse_json yield utils.parse_json_file = old_parse_json_file", "@pytest.fixture() def fake_context(): return FakeContext() def test_SanitizeKWInput_SimpleKwDictionary_Success(): kw = dict(", "{}, id='SimpleNoChange'), pytest.param({ 'path': { \"test_section\": { \"key1\": \"value1\" }", "= 'test_platform' test_configuration_name = 'test_configuration' base_test_configuration_name = 'base_configuration' test_configuration =", "'base_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name, base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name)))) fake_context = FakeContext() fake_platform_settings", "3) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when", "lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib' assert isinstance(kw['additional_settings'],", "'fake_value'}, {}, {}, {'fake_key': 'fake_value'}, id='MissingKeyInTargetKeyAdded'), pytest.param('test_target', 'copyright_org', {'copyright_org': False},", "test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform, configuration=None) # Validate all", "test_configuration_name), sections_merged) self.assertIn('*/*/test', sections_merged) self.assertIn('{}/*/test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test',", "merge_kw_dict when only platform is set and not any configurations", "another configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' base_test_configuration_name", "{ \"key1\": \"value1\" } } }, {}, 'test_section', {'key1': 'value1'},", "or its affiliates or # its licensors. # # For", "test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set() def _mock_merge_kw_section(section, target,", "test_settings.merge_kw_section(section_key=section_key, target='test_target', merge_kw=merge_dict) assert expected == merge_dict class ProjectSettingsTest(unittest.TestCase): def", "test_merge_kw_value pass test_include_settings.merge_kw_dict = _mock_merge_kw_dict # Prepare a mock context", "return test_include_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_settings_single_include) test_merge_kw", "settings self.is_test = is_test self.is_server = is_server @pytest.fixture() def mock_parse_json(mock_json_map):", "__init__(self, settings, is_test=False, is_server=False): self.settings = settings self.is_test = is_test", "== expected elif isinstance(expected, type(Errors.WafError)): with pytest.raises(Errors.WafError): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section,", "= lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) if isinstance(expected,dict): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict)", "the configuration is a test and a server configuration \"\"\"", "configuration is set, and the configuration is a test but", "assert merge_dict == expected elif isinstance(expected, type(Errors.WafError)): with pytest.raises(Errors.WafError): test_settings.merge_kw_key(target=target,", "pass class FakePlatformSettings(object): def __init__(self, platform_name, aliases=set()): self.platform = platform_name", "merge_kw_dict self.assertIn('{}/*'.format(test_platform), sections_merged) self.assertIn('{}/*'.format(test_alias), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self): \"\"\"", "pytest.raises(Errors.WafError): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) @pytest.mark.parametrize( \"test_dict, fake_include_settings, mock_json_map, additional_aliases,", "and the configuration is a test and a server configuration", "that it can recursively call merge_kw_dict recursively \"\"\" include_settings_file =", "test configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration", "test_configuration_name), sections_merged) self.assertIn('*/*/dedicated', sections_merged) self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test',", "list) assert kw['libpath'][0] == 'mylib' assert isinstance(kw['additional_settings'], list) assert isinstance(kw['additional_settings'][0],", "= {'path': test_settings_single_include, include_settings_file: test_empty_settings} # Prepare a mock include", "platform + configuration is set, and the configuration is a", "= utils.parse_json_file utils.parse_json_file = _mock_parse_json yield utils.parse_json_file = old_parse_json_file @pytest.fixture()", "fake_include_settings, mock_json_map, additional_aliases, expected\", [ pytest.param({}, None, None, {}, {},", "waflib import Errors import lumberyard_modules import unittest import pytest import", "_mock_get_platform_settings(platform): self.assertEqual(platform, test_platform) return fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings test_dict =", "and the configuration is not a test nor server configuration,", "= FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name, base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name)))) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform')", "section_key, expected): test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) merge_dict = {}", "{'copyright_org': 'AMZN'}, {}, {'copyright_org': 'A2Z'}, {'copyright_org': 'AMZN'}, id='MergeStringReplaceSuccess'), pytest.param('test_target', 'client_only',", "= lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) return test_settings def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self): \"\"\" Test", "is_server=True) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform): self.assertEqual(platform,", "False}, {}, {'client_only': True}, {'client_only': False}, id='MergeBoolReplaceKwSuccess'), ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json,", "_b): return test_include_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_settings_single_include)", "= 'test_platform' test_alias = 'alias_1' fake_context = FakeContext() fake_platform_settings =", "'client_only', {'client_only': False}, {}, {'client_only': True}, {'client_only': False}, id='MergeBoolReplaceKwSuccess'), ])", "additional_aliases) merge_dict = {} test_settings.merge_kw_section(section_key=section_key, target='test_target', merge_kw=merge_dict) assert expected ==", "include settings object test_include_settings = self.createSimpleSettings() def _mock_merge_kw_dict(target, merge_kw, platform,", "test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when only platform", "self.assertIn('{}/*/test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name,", "yield utils.parse_json_file = old_parse_json_file @pytest.fixture() def fake_context(): return FakeContext() def", "def _mock_merge_kw_section(section, target, merge_kw): sections_merged.add(section) pass test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw", "isinstance(kw['additional_settings'][0]['stlibpath'], list) assert kw['additional_settings'][0]['stlibpath'][0] == 'mystlib' @pytest.mark.parametrize( \"target, kw_key, source_section,", "source_section=source_section, merge_kw=merge_dict) @pytest.mark.parametrize( \"test_dict, fake_include_settings, mock_json_map, additional_aliases, expected\", [ pytest.param({},", "or # its licensors. # # For complete copyright and", "is a server but not a test configuration \"\"\" test_platform_name", "kw['libpath'][0] == 'mylib' assert isinstance(kw['additional_settings'], list) assert isinstance(kw['additional_settings'][0], dict) assert", "if not mock_json_map: mock_json_map = {'path': {}} def _mock_parse_json(path, _):", "test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=False) fake_context = FakeContext() fake_platform_settings", "test_alias = 'alias_1' fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform', aliases={test_alias})", "id='InvalidBoolKwInTargetError'), pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': True}, {'client_only': False},", "elif isinstance(expected, type(Errors.WafError)): with pytest.raises(Errors.WafError): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) @pytest.mark.parametrize(", "libpath='mylib' ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list) assert kw['libpath'][0] == 'mylib'", "'client_only', {'client_only': False}, {}, {'client_only': 'True'}, type(Errors.WafError), id='InvalidBoolKwInTargetError'), pytest.param('test_target', 'client_only',", "is not a test nor server configuration \"\"\" test_platform_name =", "old_parse_json_file @pytest.fixture() def fake_context(): return FakeContext() def test_SanitizeKWInput_SimpleKwDictionary_Success(): kw =", "isinstance(expected,dict): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) assert merge_dict == expected elif", "_mock_get_project_settings_file test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_settings_single_include) test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw,", "not # remove or modify any license notices. This file", "createSimpleSettings(self, fake_context = FakeContext(), test_dict={}, additional_aliases={}): self.mock_json_map = {'path': test_dict}", "self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set() def _mock_merge_kw_section(section, target, merge_kw): sections_merged.add(section)", "'path', additional_aliases) return test_settings def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self): \"\"\" Test scenario: Setup", "additional_aliases) assert test.dict == expected @pytest.mark.parametrize( \"mock_json_map, additional_aliases, section_key, expected\",", "test_configuration_name = 'test_configuration' base_test_configuration_name = 'base_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name,", "expected\", [ pytest.param({}, None, None, {}, {}, id='BasicNoAdditionalAliasNoAdditionalIncludes'), pytest.param({}, 'include_test',", "of this # distribution (the \"License\"). All use of this", "see the LICENSE at the root of this # distribution", "sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 3) def", "below or the license accompanying this file. Do not #", "FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=False, is_server=True) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform')", "# All or portions of this file Copyright (c) Amazon.com,", "test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) if isinstance(expected,dict): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section,", "test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) merge_dict = {} test_settings.merge_kw_section(section_key=section_key, target='test_target',", "{'fake_key': 'fake_value'}, {}, {}, {'fake_key': 'fake_value'}, id='MissingKeyInTargetKeyAdded'), pytest.param('test_target', 'copyright_org', {'copyright_org':", "= {} def tearDown(self): utils.parse_json_file = self.old_parse_json def mockParseJson(self, path,", "def mockParseJson(self, path, _): return self.mock_json_map[path] def createSimpleSettings(self, fake_context =", "{'client_only': False}, id='MergeBoolReplaceKwSuccess'), ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, target, kw_key, source_section, additional_aliases,", "id='BasicNoAdditionalAliasSingleAdditionalIncludes') ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, fake_context, test_dict, fake_include_settings, mock_json_map, additional_aliases, expected):", "merge_kw_dict recursively \"\"\" include_settings_file = 'include_test' test_settings_single_include = {'includes': [include_settings_file]}", "is a test and a server configuration \"\"\" test_platform_name =", "not a test configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name =", "test_include_settings.merge_kw_dict = _mock_merge_kw_dict # Prepare a mock context fake_context =", "test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test,dedicated',", "additional_aliases): assert fake_include_settings == include_settings_file fake_settings = FakeIncludeSettings() return fake_settings", "the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name),", "True}, {'client_only': False}, id='MergeBoolReplaceKwSuccess'), ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, target, kw_key, source_section,", "FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform_name) return fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings", "Amazon.com, Inc. or its affiliates or # its licensors. #", "fake_include_settings == include_settings_file fake_settings = FakeIncludeSettings() return fake_settings fake_context.get_project_settings_file =", "fake_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) assert", "id='BasicNoAdditionalAliasNoAdditionalIncludes'), pytest.param({}, 'include_test', { 'path': { 'includes': ['include_test'] },'include_test': {}", "self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name,", "= {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=None, configuration=None) self.assertIn(test_merge_kw_key, test_merge_kw) self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value)", "is set and not any configurations \"\"\" test_platform = 'test_platform'", "fake_platform_settings = FakePlatformSettings(platform_name='test_platform', aliases={test_alias}) def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform) return fake_platform_settings", "2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when", "def _mock_parse_json(path, _): return mock_json_map[path] old_parse_json_file = utils.parse_json_file utils.parse_json_file =", "lumberyard_modules import unittest import pytest import utils class FakeContext(object): pass", "# Validate all the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name),", "self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name,", "= FakeContext() test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) if isinstance(expected,dict): test_settings.merge_kw_key(target=target,", "{'copyright_org': False}, type(Errors.WafError), id='InvalidStringKwInTargetError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org':", "test.dict == expected @pytest.mark.parametrize( \"mock_json_map, additional_aliases, section_key, expected\", [ pytest.param(None,", "an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF", "expected): if fake_include_settings: def _mock_get_project_settings_file(include_settings_file, additional_aliases): assert fake_include_settings == include_settings_file", "by the license below or the license accompanying this file.", "{'includes': ['include_test']}, id='BasicNoAdditionalAliasSingleAdditionalIncludes') ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, fake_context, test_dict, fake_include_settings, mock_json_map,", "server configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration", "server but not a test configuration \"\"\" test_platform_name = 'test_platform'", "{}, {}, {}, id='MissingKeyInSourceNoChange'), pytest.param('test_target', 'fake_key', {'fake_key': 'fake_value'}, {}, {},", "self.base_config = base_config self.name = settings_name class FakeConfiguration(object): def __init__(self,", "class FakeConfigurationSettings(object): def __init__(self, settings_name, base_config=None): self.base_config = base_config self.name", "passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test',", "self.is_test = is_test self.is_server = is_server @pytest.fixture() def mock_parse_json(mock_json_map): if", "kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) assert merge_dict == expected elif isinstance(expected, type(Errors.WafError)):", "test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, target, kw_key, source_section, additional_aliases, merge_dict, expected): fake_context = FakeContext()", "Copyright (c) Amazon.com, Inc. or its affiliates or # its", "self.name = settings_name class FakeConfiguration(object): def __init__(self, settings, is_test=False, is_server=False):", "+ configuration is set, and the configuration is a test", "'mylib' def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success(): kw = dict( libpath='mylib', additional_settings=dict(stlibpath='mystlib') ) lumberyard_modules.sanitize_kw_input(kw)", "settings that contains other project settings, so that it can", "isinstance(kw['additional_settings'], list) assert isinstance(kw['additional_settings'][0], dict) assert isinstance(kw['additional_settings'][0]['stlibpath'], list) assert kw['additional_settings'][0]['stlibpath'][0]", "passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform), sections_merged) self.assertIn('{}/*'.format(test_alias), sections_merged) self.assertEqual(len(sections_merged), 2)", "]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, target, kw_key, source_section, additional_aliases, merge_dict, expected): fake_context", "},'include_test': {} }, {}, {'includes': ['include_test']}, id='BasicNoAdditionalAliasSingleAdditionalIncludes') ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json,", "'test_platform' test_configuration_name = 'test_configuration' base_test_configuration_name = 'base_configuration' test_configuration = FakeConfiguration(", "# from waflib import Errors import lumberyard_modules import unittest import", "test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json, fake_context, mock_json_map, additional_aliases, section_key, expected): test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path',", "id='MissingKeyInSourceNoChange'), pytest.param('test_target', 'fake_key', {'fake_key': 'fake_value'}, {}, {}, {'fake_key': 'fake_value'}, id='MissingKeyInTargetKeyAdded'),", "sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 11) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self):", "pytest.param('test_target', 'fake_key', {'fake_key': 'fake_value'}, {}, {}, {'fake_key': 'fake_value'}, id='MissingKeyInTargetKeyAdded'), pytest.param('test_target',", "expected elif isinstance(expected, type(Errors.WafError)): with pytest.raises(Errors.WafError): test_settings.merge_kw_key(target=target, kw_key=kw_key, source_section=source_section, merge_kw=merge_dict)", "merge_kw): sections_merged.add(section) pass test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target',", "'test_configuration' base_test_configuration_name = 'base_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name, base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name)))) fake_context", "but not a server configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name", "'AMZN'}, {}, {'copyright_org': False}, type(Errors.WafError), id='InvalidStringKwInTargetError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'},", "isinstance(kw['additional_settings'][0], dict) assert isinstance(kw['additional_settings'][0]['stlibpath'], list) assert kw['additional_settings'][0]['stlibpath'][0] == 'mystlib' @pytest.mark.parametrize(", "True self.mock_json_map = {'path': test_settings_single_include, include_settings_file: test_empty_settings} # Prepare a", "merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated', sections_merged) self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged)", "}, {}, 'test_section', {'key1': 'value1'}, id='SimpleChanges') ]) def test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json, fake_context,", "list) assert kw['additional_settings'][0]['stlibpath'][0] == 'mystlib' @pytest.mark.parametrize( \"target, kw_key, source_section, additional_aliases,", "include_settings_file: test_empty_settings} # Prepare a mock include settings object test_include_settings", "mock_json_map[path] old_parse_json_file = utils.parse_json_file utils.parse_json_file = _mock_parse_json yield utils.parse_json_file =", "None, None, {}, {}, id='BasicNoAdditionalAliasNoAdditionalIncludes'), pytest.param({}, 'include_test', { 'path': {", "this # distribution (the \"License\"). All use of this software", "'False'}, {}, {'client_only': True}, type(Errors.WafError), id='InvalidBoolKwInSourceError'), pytest.param('test_target', 'client_only', {'client_only': False},", "\"\"\" test_platform = 'test_platform' test_alias = 'alias_1' fake_context = FakeContext()", "== merge_dict class ProjectSettingsTest(unittest.TestCase): def setUp(self): self.old_parse_json = utils.parse_json_file utils.parse_json_file", "derived from another configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name =", "pytest.param({ 'path': { \"test_section\": { \"key1\": \"value1\" } } },", "the merge_kw_dict self.assertIn('{}/*'.format(test_platform), sections_merged) self.assertIn('{}/*'.format(test_alias), sections_merged) self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self):", "merge_dict class ProjectSettingsTest(unittest.TestCase): def setUp(self): self.old_parse_json = utils.parse_json_file utils.parse_json_file =", "passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged),", "'fake_key', {'fake_key': 'fake_value'}, {}, {}, {'fake_key': 'fake_value'}, id='MissingKeyInTargetKeyAdded'), pytest.param('test_target', 'copyright_org',", "sections_merged) self.assertIn('*/*/dedicated', sections_merged) self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged)", "its affiliates or # its licensors. # # For complete", "test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=False, is_server=True) fake_context = FakeContext() fake_platform_settings", "self.settings = settings self.is_test = is_test self.is_server = is_server @pytest.fixture()", "self.assertEqual(len(sections_merged), 8) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict", "expected @pytest.mark.parametrize( \"mock_json_map, additional_aliases, section_key, expected\", [ pytest.param(None, {}, 'no_section',", "'AMZN'}, id='MergeStringReplaceSuccess'), pytest.param('test_target', 'client_only', {'client_only': 'False'}, {}, {'client_only': True}, type(Errors.WafError),", "= 'include_test' test_settings_single_include = {'includes': [include_settings_file]} test_empty_settings = {} test_merge_kw_key", "{ 'path': { 'includes': ['include_test'] },'include_test': {} }, {}, {'includes':", "= 'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=False) fake_context =", "is not a test nor server configuration, but is derived", "return mock_json_map[path] old_parse_json_file = utils.parse_json_file utils.parse_json_file = _mock_parse_json yield utils.parse_json_file", "mock include settings object test_include_settings = self.createSimpleSettings() def _mock_merge_kw_dict(target, merge_kw,", "All or portions of this file Copyright (c) Amazon.com, Inc.", "]) def test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json, fake_context, mock_json_map, additional_aliases, section_key, expected): test_settings =", "merge_kw=merge_dict) assert expected == merge_dict class ProjectSettingsTest(unittest.TestCase): def setUp(self): self.old_parse_json", "a test nor server configuration, but is derived from another", "\"target, kw_key, source_section, additional_aliases, merge_dict, expected\", [ pytest.param('test_target', 'fake_key', {},", "it can recursively call merge_kw_dict recursively \"\"\" include_settings_file = 'include_test'", "return fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {} test_settings =", "id='InvalidBoolKwInSourceError'), pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': 'True'}, type(Errors.WafError), id='InvalidBoolKwInTargetError'),", "sections_merged) self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 8)", "CONDITIONS OF ANY KIND, either express or implied. # from", "def _mock_get_project_settings_file(_a, _b): return test_include_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test_settings =", "from waflib import Errors import lumberyard_modules import unittest import pytest", "configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' base_test_configuration_name =", "is derived from another configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name", "fake_context(): return FakeContext() def test_SanitizeKWInput_SimpleKwDictionary_Success(): kw = dict( libpath='mylib' )", "{} test_settings.merge_kw_section(section_key=section_key, target='test_target', merge_kw=merge_dict) assert expected == merge_dict class ProjectSettingsTest(unittest.TestCase):", "FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform_name) return fake_platform_settings", "unittest import pytest import utils class FakeContext(object): pass class FakeIncludeSettings(object):", "\"\"\" Test scenario: Test the merge_kw_dict when only platform is", "test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform_name, configuration=test_configuration) # Validate all", "= 'test_configuration' base_test_configuration_name = 'base_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name, base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name))))", "configuration is not a test nor server configuration \"\"\" test_platform_name", "Test scenario: Test the merge_kw_dict when only platform is set", "this file Copyright (c) Amazon.com, Inc. or its affiliates or", "= FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=True) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform')", "== 'mylib' assert isinstance(kw['additional_settings'], list) assert isinstance(kw['additional_settings'][0], dict) assert isinstance(kw['additional_settings'][0]['stlibpath'],", "by the License, # or, if provided, by the license", "a test nor server configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name", "{'includes': [include_settings_file]} test_empty_settings = {} test_merge_kw_key = 'passed' test_merge_kw_value =", "or modify any license notices. This file is distributed on", "{'key1': 'value1'}, id='SimpleChanges') ]) def test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json, fake_context, mock_json_map, additional_aliases, section_key,", "the configuration is a server but not a test configuration", "import utils class FakeContext(object): pass class FakeIncludeSettings(object): pass class FakePlatformSettings(object):", "\"\"\" Test scenario: Test the merge_kw_dict when the platform +", "test_merge_kw_value = True self.mock_json_map = {'path': test_settings_single_include, include_settings_file: test_empty_settings} #", "OF ANY KIND, either express or implied. # from waflib", "_mock_get_project_settings_file(include_settings_file, additional_aliases): assert fake_include_settings == include_settings_file fake_settings = FakeIncludeSettings() return", "fake_include_settings, mock_json_map, additional_aliases, expected): if fake_include_settings: def _mock_get_project_settings_file(include_settings_file, additional_aliases): assert", "other project settings, so that it can recursively call merge_kw_dict", "= 'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=False, is_server=True) fake_context =", "test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name),", "self.assertIn(test_merge_kw_key, test_merge_kw) self.assertEqual(test_merge_kw[test_merge_kw_key], test_merge_kw_value) def test_ProjectSettingsFileMergeKwDict_MergePlatformSection_Success(self): \"\"\" Test scenario: Test", "self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated', sections_merged) self.assertIn('{}/*/dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged)", "the merge_kw_dict self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name,", "'include_test', { 'path': { 'includes': ['include_test'] },'include_test': {} }, {},", "FakeContext() test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) if isinstance(expected,dict): test_settings.merge_kw_key(target=target, kw_key=kw_key,", "a project settings that contains other project settings, so that", "is_test self.is_server = is_server @pytest.fixture() def mock_parse_json(mock_json_map): if not mock_json_map:", "scenario: Setup a project settings that contains other project settings,", "'A2Z'}, {'copyright_org': 'AMZN'}, id='MergeStringReplaceSuccess'), pytest.param('test_target', 'client_only', {'client_only': 'False'}, {}, {'client_only':", "= {} test_settings.merge_kw_section(section_key=section_key, target='test_target', merge_kw=merge_dict) assert expected == merge_dict class", "platform_name self.aliases = aliases class FakeConfigurationSettings(object): def __init__(self, settings_name, base_config=None):", "Test scenario: Test the merge_kw_dict when the platform + configuration", "\"test_dict, fake_include_settings, mock_json_map, additional_aliases, expected\", [ pytest.param({}, None, None, {},", "Prepare a mock context fake_context = FakeContext() def _mock_get_project_settings_file(_a, _b):", "Test the merge_kw_dict when the platform + configuration is set,", "merge_dict, expected\", [ pytest.param('test_target', 'fake_key', {}, {}, {}, {}, id='MissingKeyInSourceNoChange'),", "'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=False, is_server=True) fake_context = FakeContext()", "governed by the License, # or, if provided, by the", "pytest.param('test_target', 'client_only', {'client_only': 'False'}, {}, {'client_only': True}, type(Errors.WafError), id='InvalidBoolKwInSourceError'), pytest.param('test_target',", "merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test', sections_merged) self.assertIn('{}/*/test'.format(test_platform_name), sections_merged)", "use of this software is governed by the License, #", "settings=FakeConfigurationSettings(settings_name=test_configuration_name, base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name)))) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform):", "kw = dict( libpath='mylib', additional_settings=dict(stlibpath='mystlib') ) lumberyard_modules.sanitize_kw_input(kw) assert isinstance(kw['libpath'], list)", "sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform_name), sections_merged) self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged)", "server configuration, but is derived from another configuration \"\"\" test_platform_name", "pass test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=test_platform,", "self.createSimpleSettings() def _mock_merge_kw_dict(target, merge_kw, platform, configuration): merge_kw[test_merge_kw_key] = test_merge_kw_value pass", "test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when the platform", "a server but not a test configuration \"\"\" test_platform_name =", "or implied. # from waflib import Errors import lumberyard_modules import", "self.assertEqual(len(sections_merged), 2) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestNoDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict", "sections_merged) self.assertIn('{}/{}'.format(test_platform_name, base_test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 3) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self): \"\"\" Test", "'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=False)", "\"key1\": \"value1\" } } }, {}, 'test_section', {'key1': 'value1'}, id='SimpleChanges')", "self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertEqual(len(sections_merged), 8) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestNoDedicatedSection_Success(self): \"\"\"", "lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) return test_settings def test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self): \"\"\" Test scenario:", "{}, {}, id='MissingKeyInSourceNoChange'), pytest.param('test_target', 'fake_key', {'fake_key': 'fake_value'}, {}, {}, {'fake_key':", "test_empty_settings} # Prepare a mock include settings object test_include_settings =", "For complete copyright and license terms please see the LICENSE", "sections passed to the merge_kw_dict self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged)", "type(Errors.WafError), id='InvalidBoolKwInSourceError'), pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only': 'True'}, type(Errors.WafError),", "pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': 'A2Z'}, {'copyright_org': 'AMZN'}, id='MergeStringReplaceSuccess'),", "if fake_include_settings: def _mock_get_project_settings_file(include_settings_file, additional_aliases): assert fake_include_settings == include_settings_file fake_settings", "{}, 'test_section', {'key1': 'value1'}, id='SimpleChanges') ]) def test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json, fake_context, mock_json_map,", "{'path': {}} def _mock_parse_json(path, _): return mock_json_map[path] old_parse_json_file = utils.parse_json_file", "pytest import utils class FakeContext(object): pass class FakeIncludeSettings(object): pass class", "type(Errors.WafError), id='InvalidStringKwInSourceError'), pytest.param('test_target', 'copyright_org', {'copyright_org': 'AMZN'}, {}, {'copyright_org': False}, type(Errors.WafError),", "test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' base_test_configuration_name = 'base_configuration' test_configuration", "= {'path': test_dict} test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) return test_settings", "is_test=True, is_server=True) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform):", "def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedNoTestDedicatedSection_Success(self): \"\"\" Test scenario: Test the merge_kw_dict when the", "\"License\"). All use of this software is governed by the", "_): return mock_json_map[path] old_parse_json_file = utils.parse_json_file utils.parse_json_file = _mock_parse_json yield", "kw_key=kw_key, source_section=source_section, merge_kw=merge_dict) @pytest.mark.parametrize( \"test_dict, fake_include_settings, mock_json_map, additional_aliases, expected\", [", "_): return self.mock_json_map[path] def createSimpleSettings(self, fake_context = FakeContext(), test_dict={}, additional_aliases={}):", "_mock_merge_kw_section(section, target, merge_kw): sections_merged.add(section) pass test_settings.merge_kw_section = _mock_merge_kw_section test_merge_kw =", "distributed on an \"AS IS\" BASIS, # WITHOUT WARRANTIES OR", "'value1'}, id='SimpleChanges') ]) def test_ProjectSettingsFileMergeKwSection_ValidInputs_Success(mock_parse_json, fake_context, mock_json_map, additional_aliases, section_key, expected):", "{'path': test_settings_single_include, include_settings_file: test_empty_settings} # Prepare a mock include settings", "test_include_settings fake_context.get_project_settings_file = _mock_get_project_settings_file test_settings = self.createSimpleSettings(fake_context=fake_context, test_dict=test_settings_single_include) test_merge_kw =", "expected): fake_context = FakeContext() test_settings = lumberyard_modules.ProjectSettingsFile(fake_context, 'path', additional_aliases) if", "self.mockParseJson self.mock_json_map = {} def tearDown(self): utils.parse_json_file = self.old_parse_json def", "Test scenario: Setup a project settings that contains other project", "set, and the configuration is not a test nor server", "= FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform', aliases={test_alias}) def _mock_get_platform_settings(platform): self.assertEqual(platform, test_platform)", "== 'mylib' def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success(): kw = dict( libpath='mylib', additional_settings=dict(stlibpath='mystlib') )", "Validate all the sections passed to the merge_kw_dict self.assertIn('{}/*'.format(test_platform), sections_merged)", "test_ProjectSettingsFileMergeKwDict_RecursiveMergeAdditionalSettingsNoPlatformNoConfiguration_Success(self): \"\"\" Test scenario: Setup a project settings that contains", "complete copyright and license terms please see the LICENSE at", "{'client_only': 'False'}, {}, {'client_only': True}, type(Errors.WafError), id='InvalidBoolKwInSourceError'), pytest.param('test_target', 'client_only', {'client_only':", "but not a test configuration \"\"\" test_platform_name = 'test_platform' test_configuration_name", "file is distributed on an \"AS IS\" BASIS, # WITHOUT", "test_dict=test_settings_single_include) test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=None, configuration=None) self.assertIn(test_merge_kw_key, test_merge_kw)", "\"AS IS\" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY", "test_settings_single_include = {'includes': [include_settings_file]} test_empty_settings = {} test_merge_kw_key = 'passed'", "[include_settings_file]} test_empty_settings = {} test_merge_kw_key = 'passed' test_merge_kw_value = True", "of this software is governed by the License, # or,", "a mock context fake_context = FakeContext() def _mock_get_project_settings_file(_a, _b): return", "FakeConfiguration( settings=FakeConfigurationSettings(settings_name=test_configuration_name, base_config=FakeConfiguration(FakeConfigurationSettings(settings_name=base_test_configuration_name)))) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def", "FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name)) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def _mock_get_platform_settings(platform): self.assertEqual(platform,", "self.assertIn('{}/{}/dedicated'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged)", "return FakeContext() def test_SanitizeKWInput_SimpleKwDictionary_Success(): kw = dict( libpath='mylib' ) lumberyard_modules.sanitize_kw_input(kw)", "test_platform) return fake_platform_settings fake_context.get_platform_settings = _mock_get_platform_settings test_dict = {} test_settings", "FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name), is_test=True, is_server=True) fake_context = FakeContext() fake_platform_settings = FakePlatformSettings(platform_name='test_platform') def", "platform + configuration is set, and the configuration is not", "test_merge_kw = {} test_settings.merge_kw_dict(target='test_target', merge_kw=test_merge_kw, platform=None, configuration=None) self.assertIn(test_merge_kw_key, test_merge_kw) self.assertEqual(test_merge_kw[test_merge_kw_key],", "at the root of this # distribution (the \"License\"). All", "the platform + configuration is set, and the configuration is", "sections_merged) self.assertEqual(len(sections_merged), 3) def test_ProjectSettingsFileMergeKwDict_MergePlatformConfigurationNoDerivedTestDedicatedSection_Success(self): \"\"\" Test scenario: Test the", "['include_test'] },'include_test': {} }, {}, {'includes': ['include_test']}, id='BasicNoAdditionalAliasSingleAdditionalIncludes') ]) def", "self.assertIn('{}/{}/dedicated,test'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/test,dedicated', sections_merged) self.assertIn('{}/*/test,dedicated'.format(test_platform_name), sections_merged) self.assertIn('{}/{}/test,dedicated'.format(test_platform_name, test_configuration_name), sections_merged)", "and the configuration is a server but not a test", "pytest.param(None, {}, 'no_section', {}, id='SimpleNoChange'), pytest.param({ 'path': { \"test_section\": {", "configuration is a server but not a test configuration \"\"\"", "software is governed by the License, # or, if provided,", "or portions of this file Copyright (c) Amazon.com, Inc. or", "target, kw_key, source_section, additional_aliases, merge_dict, expected): fake_context = FakeContext() test_settings", "= self.createSimpleSettings(fake_context=fake_context, test_dict=test_dict) sections_merged = set() def _mock_merge_kw_section(section, target, merge_kw):", "fake_context = FakeContext(), test_dict={}, additional_aliases={}): self.mock_json_map = {'path': test_dict} test_settings", "or the license accompanying this file. Do not # remove", "{'client_only': False}, {}, {'client_only': 'True'}, type(Errors.WafError), id='InvalidBoolKwInTargetError'), pytest.param('test_target', 'client_only', {'client_only':", "pytest.param({}, 'include_test', { 'path': { 'includes': ['include_test'] },'include_test': {} },", "['include_test']}, id='BasicNoAdditionalAliasSingleAdditionalIncludes') ]) def test_ProjectSettingsFileMergeKwKey_ValidInputs(mock_parse_json, fake_context, test_dict, fake_include_settings, mock_json_map, additional_aliases,", "= _mock_parse_json yield utils.parse_json_file = old_parse_json_file @pytest.fixture() def fake_context(): return", "'path', additional_aliases) merge_dict = {} test_settings.merge_kw_section(section_key=section_key, target='test_target', merge_kw=merge_dict) assert expected", "to the merge_kw_dict self.assertIn('{}/{}'.format(test_platform_name, test_configuration_name), sections_merged) self.assertIn('*/*/dedicated,test', sections_merged) self.assertIn('{}/*/dedicated,test'.format(test_platform_name), sections_merged)", "{'client_only': True}, type(Errors.WafError), id='InvalidBoolKwInSourceError'), pytest.param('test_target', 'client_only', {'client_only': False}, {}, {'client_only':", "this software is governed by the License, # or, if", "test_platform_name = 'test_platform' test_configuration_name = 'test_configuration' test_configuration = FakeConfiguration(settings=FakeConfigurationSettings(settings_name=test_configuration_name)) fake_context", "assert kw['libpath'][0] == 'mylib' def test_SanitizeKWInput_SimpleKwDictionaryInAdditionalSettings_Success(): kw = dict( libpath='mylib',", "All use of this software is governed by the License," ]
[ "absolute error variables to variable list for point_cnt in range(len(points)):", "error definition.\"\"\" objective = solver.Objective() for variable in variables[num_of_coeff:]: objective.SetCoefficient(variable,", "optimal linear polynomial. Uses 5 points from Swanson's curve fitting", "var_to_val = dict() for coeff in variables[:num_of_coeff]: var_to_val[coeff.name()] = coeff.solution_value()", "coeff_ranges[coeff_num][1] is None: upper_bound = solver.Infinity() else: upper_bound = coeff_ranges[coeff_num][1]", "1] constraint.SetCoefficient(ex_plus, -1) constraint.SetCoefficient(ex_minus, 1) constraints.append(constraint) return constraints def get_optimal_polynomial(", "def _generate_variables(solver, points, coeff_ranges, err_max, error_def): \"\"\"Create coefficient variables. Initial", "out naming scheme for arbitrary number of variables. \"\"\" num_of_coeff", "variable list. if num_of_coeff == 2: coeff_names.append('m') coeff_names.append('b') else: for", "ther values. \"\"\" if coeff_ranges is None: raise ValueError('Please provide", "(0,1), (1,3), (2,2), (3,4), (4,5) coeff_ranges = ((None, None), (None,", "alphabet used for coefficient names. TODO(drofp): Figure out naming scheme", "\"\"\" if coeff_ranges is None: raise ValueError('Please provide appropriate coefficient", "# print('Optimized m: {}, b: {}'.format(m, b)) def demo_optimal_linear_10points(): print('STARTING", "string from ortools.linear_solver import pywraplp class ErrorDefinition(enum.Enum): SUM_ABS_DEV = enum.auto()", "(min, max). Nubmer of elements in list determines order of", "to find the optimal coefficients for a given polynomial. Overview:", "Dictionary, the desired coefficients mapped to ther values. \"\"\" if", "= variables[num_of_coeff + 2 * point_num] ex_minus = variables[num_of_coeff +", "coeff_ranges = ((None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_10points():", "Sum of errors Subject to: Bounds on coefficients Credit: \"Curve", "* point_num + 1] constraint.SetCoefficient(ex_plus, -1) constraint.SetCoefficient(ex_minus, 1) constraints.append(constraint) return", "maximum error allowable. solver: a ortools.pywraplp.Solver object, if a specific", "for coeff_num in range(num_of_coeff): if coeff_ranges[coeff_num][0] is None: lower_bound =", "len(coeff_ranges) _generate_objective_fn(solver, num_of_coeff, variables) _generate_constraints(solver, points, num_of_coeff, variables) solver.Solve() var_to_val", "[] # Add coefficients to variable list. if num_of_coeff ==", "sum of error for each fit point to find the", "coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV, err_max=10000, solver=None): \"\"\"Optimize coefficients for any order polynomial.", "str(point_cnt + 1) + '_plus') negative_err_var = solver.NumVar( 0, err_max,", "coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_10points(): print('STARTING QUADRATIC DEMO WITH 10 POINTS FROM", "on coefficients Credit: \"Curve Fitting with Linear Programming\", <NAME> and", "paper. \"\"\" print('STARTING LINEAR DEMO WITH 5 POINTS FROM SWANSON", "points, coeff_ranges, err_max, error_def): \"\"\"Create coefficient variables. Initial version works", "[1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5]", "def demo_optimal_linear_10points(): print('STARTING LINEAR DEMO WITH 10 POINTS FROM WILLIAMS')", "- coeff_num - 1 x_val = point[0] ** power constraint.SetCoefficient(coeff,", "m: {}, b: {}'.format(m, b)) def demo_optimal_linear_10points(): print('STARTING LINEAR DEMO", "err_max, 'e' + str(point_cnt + 1) + '_plus') negative_err_var =", "err_max: An Integer, specifying the maximum error allowable. solver: a", "'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) variables = _generate_variables( solver, points, coeff_ranges, err_max=err_max, error_def=error_def)", "appropriate coefficient range.') if solver is None: solver = pywraplp.Solver(", "for coeff_num, coeff in enumerate(variables[:num_of_coeff]): power = num_of_coeff - coeff_num", "to variable list for point_cnt in range(len(points)): positive_err_var = solver.NumVar(", "polynomial. Overview: Objective: Sum of errors Subject to: Bounds on", "enum import string from ortools.linear_solver import pywraplp class ErrorDefinition(enum.Enum): SUM_ABS_DEV", "the definition for error. err_max: An Integer, specifying the maximum", "num_of_coeff, variables): constraints = [] for point_num, point in enumerate(points):", "2 * point_num + 1] constraint.SetCoefficient(ex_plus, -1) constraint.SetCoefficient(ex_minus, 1) constraints.append(constraint)", "coeff_num in range(num_of_coeff): if coeff_ranges[coeff_num][0] is None: lower_bound = -solver.Infinity()", "x_val) # Error terms ex_plus = variables[num_of_coeff + 2 *", "tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None, None), (None, None),", "is None: solver = pywraplp.Solver( 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) variables = _generate_variables(", "coeff_ranges[coeff_num][1] variables.append( solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num])) # Add absolute error variables", "4.6, 6.0, 6.8, 7.3]) points = tuple(zip(x_vals, y_vals)) coeff_ranges =", "_generate_variables( solver, points, coeff_ranges, err_max=err_max, error_def=error_def) num_of_coeff = len(coeff_ranges) _generate_objective_fn(solver,", "[] coeff_names = [] # Add coefficients to variable list.", "terms ex_plus = variables[num_of_coeff + 2 * point_num] ex_minus =", "range(num_of_coeff): coeff_names.append(string.ascii_lowercase[letter_cnt]) for coeff_num in range(num_of_coeff): if coeff_ranges[coeff_num][0] is None:", "points from Swanson's curve fitting paper. \"\"\" print('STARTING LINEAR DEMO", "solver = pywraplp.Solver( # 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) optimized_coefficients = get_optimal_polynomial( points=points,", "instance is requested by caller. Returns: A Dictionary, the desired", "err_max, error_def): \"\"\"Create coefficient variables. Initial version works for up", "linear polynomial. Uses 5 points from Swanson's curve fitting paper.", "2.0, 2.7, 3.5] y_vals.extend([1.0, 4.0, 3.6, 2.7, 5.7, 4.6, 6.0,", "num_of_coeff = len(coeff_ranges) _generate_objective_fn(solver, num_of_coeff, variables) _generate_constraints(solver, points, num_of_coeff, variables)", "constraint.SetCoefficient(ex_plus, -1) constraint.SetCoefficient(ex_minus, 1) constraints.append(constraint) return constraints def get_optimal_polynomial( points=None,", "demo_optimal_quadratic_19points(): print('STARTING QUADRATIC DEMO WITH 19 POINTS FROM WILLIAMS') x_vals", "class ErrorDefinition(enum.Enum): SUM_ABS_DEV = enum.auto() SUM_MAX_DEVIATION = enum.auto() def _generate_variables(solver,", "(None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_cubic_10points(): print('STARTING CUBIC", "= tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None, None)) print(get_optimal_polynomial(points=points,", "variable polynomial. One letter per english alphabet used for coefficient", "from __future__ import absolute_import from __future__ import division from __future__", "determines order of polynomial, from highest order (0th index) to", "of error for each fit point to find the optimal", "negative_err_var = solver.NumVar( 0, err_max, 'e' + str(point_cnt + 1)", "python3 \"\"\"Curve fitting with linear programming. Minimizes the sum of", "from __future__ import division from __future__ import print_function import enum", "order polynomial. Args: points: A tuple of points, represented as", "(4,5) coeff_ranges = ((None, None), (None, None)) # solver =", "\"\"\"Generate objective function for given error definition.\"\"\" objective = solver.Objective()", "= enum.auto() SUM_MAX_DEVIATION = enum.auto() def _generate_variables(solver, points, coeff_ranges, err_max,", "= solver.NumVar( 0, err_max, 'e' + str(point_cnt + 1) +", "variables.append(negative_err_var) return variables def _generate_objective_fn( solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV): \"\"\"Generate", "point to find the optimal coefficients for a given polynomial.", "in list determines order of polynomial, from highest order (0th", "y_vals)) coeff_ranges = ((None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points,", "the sum of error for each fit point to find", "WITH 5 POINTS FROM SWANSON PAPER') points = (0,1), (1,3),", "\"\"\" num_of_coeff = len(coeff_ranges) variables = [] coeff_names = []", "return var_to_val def demo_optimal_linear_5points(): \"\"\"Demonstration of getting optimal linear polynomial.", "= coeff_ranges[coeff_num][0] if coeff_ranges[coeff_num][1] is None: upper_bound = solver.Infinity() else:", "1 x_val = point[0] ** power constraint.SetCoefficient(coeff, x_val) # Error", "variable in variables[num_of_coeff:]: objective.SetCoefficient(variable, 1) return objective def _generate_constraints(solver, points,", "for up to 26 variable polynomial. One letter per english", "DEMO WITH 19 POINTS FROM WILLIAMS') x_vals = [0.0, 0.5,", "1) constraints.append(constraint) return constraints def get_optimal_polynomial( points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV, err_max=10000,", "solver.NumVar( 0, err_max, 'e' + str(point_cnt + 1) + '_minus')", "= point[0] ** power constraint.SetCoefficient(coeff, x_val) # Error terms ex_plus", "= ((None, None), (None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points,", "solver.Constraint(point[1], point[1]) # Resultant Coefficient terms for coeff_num, coeff in", "for letter_cnt in range(num_of_coeff): coeff_names.append(string.ascii_lowercase[letter_cnt]) for coeff_num in range(num_of_coeff): if", "optimal coefficients for a given polynomial. Overview: Objective: Sum of", "5.5, 6.0, 6.6, 7.0, 7.6, 8.5, 9.0, 10.0]) y_vals =", "3.5, 4.0, 4.5] y_vals = [1.0, 0.9, 0.7, 1.5, 2.0,", "if coeff_ranges[coeff_num][0] is None: lower_bound = -solver.Infinity() else: lower_bound =", "= ((None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_10points(): print('STARTING", "+ '_minus') variables.append(positive_err_var) variables.append(negative_err_var) return variables def _generate_objective_fn( solver, num_of_coeff,", "10 POINTS FROM WILLIAMS') x_vals = [0.0, 0.5, 1.0, 1.5,", "in variables[:num_of_coeff]: var_to_val[coeff.name()] = coeff.solution_value() return var_to_val def demo_optimal_linear_5points(): \"\"\"Demonstration", "1) return objective def _generate_constraints(solver, points, num_of_coeff, variables): constraints =", "DEMO WITH 10 POINTS FROM WILLIAMS') x_vals = [0.0, 0.5,", "each fit point to find the optimal coefficients for a", "{}'.format(m, b)) def demo_optimal_linear_10points(): print('STARTING LINEAR DEMO WITH 10 POINTS", "Add absolute error variables to variable list for point_cnt in", "coefficient names. TODO(drofp): Figure out naming scheme for arbitrary number", "constraints.append(constraint) return constraints def get_optimal_polynomial( points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV, err_max=10000, solver=None):", "10.0]) y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2,", "linear programming. Minimizes the sum of error for each fit", "x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5,", "(None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def main(): demo_optimal_quadratic_19points() if", "import pywraplp class ErrorDefinition(enum.Enum): SUM_ABS_DEV = enum.auto() SUM_MAX_DEVIATION = enum.auto()", "Equivalency constraint constraint = solver.Constraint(point[1], point[1]) # Resultant Coefficient terms", "-1) constraint.SetCoefficient(ex_minus, 1) constraints.append(constraint) return constraints def get_optimal_polynomial( points=None, coeff_ranges=None,", "points, num_of_coeff, variables) solver.Solve() var_to_val = dict() for coeff in", "get_optimal_polynomial( points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV, err_max=10000, solver=None): \"\"\"Optimize coefficients for any", "num_of_coeff - coeff_num - 1 x_val = point[0] ** power", "2.7, 5.7, 4.6, 6.0, 6.8, 7.3]) points = tuple(zip(x_vals, y_vals))", "if coeff_ranges[coeff_num][1] is None: upper_bound = solver.Infinity() else: upper_bound =", "print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_19points(): print('STARTING QUADRATIC DEMO WITH 19 POINTS", "One letter per english alphabet used for coefficient names. TODO(drofp):", "for point_cnt in range(len(points)): positive_err_var = solver.NumVar( 0, err_max, 'e'", "constraints def get_optimal_polynomial( points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV, err_max=10000, solver=None): \"\"\"Optimize coefficients", "is None: raise ValueError('Please provide appropriate coefficient range.') if solver", "Objective: Sum of errors Subject to: Bounds on coefficients Credit:", "0, err_max, 'e' + str(point_cnt + 1) + '_minus') variables.append(positive_err_var)", "solver is None: solver = pywraplp.Solver( 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) variables =", "b = optimized_coefficients # print('Optimized m: {}, b: {}'.format(m, b))", "demo_optimal_linear_5points(): \"\"\"Demonstration of getting optimal linear polynomial. Uses 5 points", "points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None, None),", "num_of_coeff, variables) _generate_constraints(solver, points, num_of_coeff, variables) solver.Solve() var_to_val = dict()", "'e' + str(point_cnt + 1) + '_plus') negative_err_var = solver.NumVar(", "\"Curve Fitting with Linear Programming\", <NAME> and <NAME> \"\"\" from", "works for up to 26 variable polynomial. One letter per", "of elements in list determines order of polynomial, from highest", "any order polynomial. Args: points: A tuple of points, represented", "if coeff_ranges is None: raise ValueError('Please provide appropriate coefficient range.')", "+ str(point_cnt + 1) + '_plus') negative_err_var = solver.NumVar( 0,", "scheme for arbitrary number of variables. \"\"\" num_of_coeff = len(coeff_ranges)", "str(point_cnt + 1) + '_minus') variables.append(positive_err_var) variables.append(negative_err_var) return variables def", "coeff_ranges is None: raise ValueError('Please provide appropriate coefficient range.') if", "(None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_19points(): print('STARTING QUADRATIC", "order of polynomial, from highest order (0th index) to lowest", "2.0, 2.4, 3.2, 2.0, 2.7, 3.5] y_vals.extend([1.0, 4.0, 3.6, 2.7,", "range(len(points)): positive_err_var = solver.NumVar( 0, err_max, 'e' + str(point_cnt +", "= enum.auto() def _generate_variables(solver, points, coeff_ranges, err_max, error_def): \"\"\"Create coefficient", "_generate_constraints(solver, points, num_of_coeff, variables): constraints = [] for point_num, point", "coeff_ranges, err_max=err_max, error_def=error_def) num_of_coeff = len(coeff_ranges) _generate_objective_fn(solver, num_of_coeff, variables) _generate_constraints(solver,", "print('STARTING LINEAR DEMO WITH 5 POINTS FROM SWANSON PAPER') points", "order (nth index). err_def: An ErrorDefinition enum, specifying the definition", "coeff_ranges = ((None, None), (None, None)) # solver = pywraplp.Solver(", "variables[num_of_coeff:]: objective.SetCoefficient(variable, 1) return objective def _generate_constraints(solver, points, num_of_coeff, variables):", "3.5] points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None,", "= pywraplp.Solver( # 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) optimized_coefficients = get_optimal_polynomial( points=points, coeff_ranges=coeff_ranges)", "FROM WILLIAMS') x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5,", "coeff in variables[:num_of_coeff]: var_to_val[coeff.name()] = coeff.solution_value() return var_to_val def demo_optimal_linear_5points():", "max). Nubmer of elements in list determines order of polynomial,", "err_def: An ErrorDefinition enum, specifying the definition for error. err_max:", "num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV): \"\"\"Generate objective function for given error definition.\"\"\"", "object, if a specific solver instance is requested by caller.", "0, err_max, 'e' + str(point_cnt + 1) + '_plus') negative_err_var", "print_function import enum import string from ortools.linear_solver import pywraplp class", "constraint.SetCoefficient(ex_minus, 1) constraints.append(constraint) return constraints def get_optimal_polynomial( points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV,", "errors Subject to: Bounds on coefficients Credit: \"Curve Fitting with", "\"\"\"Curve fitting with linear programming. Minimizes the sum of error", "== 2: coeff_names.append('m') coeff_names.append('b') else: for letter_cnt in range(num_of_coeff): coeff_names.append(string.ascii_lowercase[letter_cnt])", "num_of_coeff, variables) solver.Solve() var_to_val = dict() for coeff in variables[:num_of_coeff]:", "7.6, 8.5, 9.0, 10.0]) y_vals = [1.0, 0.9, 0.7, 1.5,", "list. if num_of_coeff == 2: coeff_names.append('m') coeff_names.append('b') else: for letter_cnt", "variables, error_def=ErrorDefinition.SUM_ABS_DEV): \"\"\"Generate objective function for given error definition.\"\"\" objective", "0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5] y_vals.extend([1.0, 4.0,", "def demo_optimal_quadratic_10points(): print('STARTING QUADRATIC DEMO WITH 10 POINTS FROM WILLIAMS')", "# Add coefficients to variable list. if num_of_coeff == 2:", "dict() for coeff in variables[:num_of_coeff]: var_to_val[coeff.name()] = coeff.solution_value() return var_to_val", "1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5] y_vals =", "of variables. \"\"\" num_of_coeff = len(coeff_ranges) variables = [] coeff_names", "b: {}'.format(m, b)) def demo_optimal_linear_10points(): print('STARTING LINEAR DEMO WITH 10", "ErrorDefinition(enum.Enum): SUM_ABS_DEV = enum.auto() SUM_MAX_DEVIATION = enum.auto() def _generate_variables(solver, points,", "arbitrary number of variables. \"\"\" num_of_coeff = len(coeff_ranges) variables =", "((None, None), (None, None)) # solver = pywraplp.Solver( # 'polynomial_solver',", "print( 'type(optimized_coefficients): {}'.format( type(optimized_coefficients))) print('optimized_coefficients: {}'.format(optimized_coefficients)) # m, b =", "specifying the definition for error. err_max: An Integer, specifying the", "curve fitting paper. \"\"\" print('STARTING LINEAR DEMO WITH 5 POINTS", "power constraint.SetCoefficient(coeff, x_val) # Error terms ex_plus = variables[num_of_coeff +", "coeff_names = [] # Add coefficients to variable list. if", "# 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) optimized_coefficients = get_optimal_polynomial( points=points, coeff_ranges=coeff_ranges) for elm", "variables def _generate_objective_fn( solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV): \"\"\"Generate objective function", "Subject to: Bounds on coefficients Credit: \"Curve Fitting with Linear", "from ortools.linear_solver import pywraplp class ErrorDefinition(enum.Enum): SUM_ABS_DEV = enum.auto() SUM_MAX_DEVIATION", "None: solver = pywraplp.Solver( 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) variables = _generate_variables( solver,", "from __future__ import print_function import enum import string from ortools.linear_solver", "letter_cnt in range(num_of_coeff): coeff_names.append(string.ascii_lowercase[letter_cnt]) for coeff_num in range(num_of_coeff): if coeff_ranges[coeff_num][0]", "represented as tuples (x, y) coeff_ranges: A tuple of valid", "(nth index). err_def: An ErrorDefinition enum, specifying the definition for", "= get_optimal_polynomial( points=points, coeff_ranges=coeff_ranges) for elm in optimized_coefficients: print('elm: {}'.format(elm))", "(None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_cubic_10points(): print('STARTING CUBIC DEMO WITH", "variables = _generate_variables( solver, points, coeff_ranges, err_max=err_max, error_def=error_def) num_of_coeff =", "variables[:num_of_coeff]: var_to_val[coeff.name()] = coeff.solution_value() return var_to_val def demo_optimal_linear_5points(): \"\"\"Demonstration of", "in variables[num_of_coeff:]: objective.SetCoefficient(variable, 1) return objective def _generate_constraints(solver, points, num_of_coeff,", "ex_minus = variables[num_of_coeff + 2 * point_num + 1] constraint.SetCoefficient(ex_plus,", "4.0, 3.6, 2.7, 5.7, 4.6, 6.0, 6.8, 7.3]) points =", "and <NAME> \"\"\" from __future__ import absolute_import from __future__ import", "in range(num_of_coeff): coeff_names.append(string.ascii_lowercase[letter_cnt]) for coeff_num in range(num_of_coeff): if coeff_ranges[coeff_num][0] is", "+ str(point_cnt + 1) + '_minus') variables.append(positive_err_var) variables.append(negative_err_var) return variables", "= solver.Infinity() else: upper_bound = coeff_ranges[coeff_num][1] variables.append( solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num]))", "the optimal coefficients for a given polynomial. Overview: Objective: Sum", "demo_optimal_linear_10points(): print('STARTING LINEAR DEMO WITH 10 POINTS FROM WILLIAMS') x_vals", "coeff in enumerate(variables[:num_of_coeff]): power = num_of_coeff - coeff_num - 1", "DEMO WITH 5 POINTS FROM SWANSON PAPER') points = (0,1),", "Programming\", <NAME> and <NAME> \"\"\" from __future__ import absolute_import from", "y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0,", "((None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_19points():", "1.9, 2.5, 3.0, 3.5, 4.0, 4.5] y_vals = [1.0, 0.9,", "(3,4), (4,5) coeff_ranges = ((None, None), (None, None)) # solver", "err_max=10000, solver=None): \"\"\"Optimize coefficients for any order polynomial. Args: points:", "optimized_coefficients: print('elm: {}'.format(elm)) print( 'type(optimized_coefficients): {}'.format( type(optimized_coefficients))) print('optimized_coefficients: {}'.format(optimized_coefficients)) #", "(x, y) coeff_ranges: A tuple of valid coefficient ranges, respresented", "to ther values. \"\"\" if coeff_ranges is None: raise ValueError('Please", "5 POINTS FROM SWANSON PAPER') points = (0,1), (1,3), (2,2),", "<NAME> \"\"\" from __future__ import absolute_import from __future__ import division", "upper_bound, coeff_names[coeff_num])) # Add absolute error variables to variable list", "4.0, 4.5] y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4,", "(0th index) to lowest order (nth index). err_def: An ErrorDefinition", "print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_10points(): print('STARTING QUADRATIC DEMO WITH 10 POINTS", "optimized_coefficients # print('Optimized m: {}, b: {}'.format(m, b)) def demo_optimal_linear_10points():", "point in enumerate(points): # Equivalency constraint constraint = solver.Constraint(point[1], point[1])", "= [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0,", "coeff_names.append(string.ascii_lowercase[letter_cnt]) for coeff_num in range(num_of_coeff): if coeff_ranges[coeff_num][0] is None: lower_bound", "of valid coefficient ranges, respresented as tuples (min, max). Nubmer", "point_num + 1] constraint.SetCoefficient(ex_plus, -1) constraint.SetCoefficient(ex_minus, 1) constraints.append(constraint) return constraints", "given polynomial. Overview: Objective: Sum of errors Subject to: Bounds", "A Dictionary, the desired coefficients mapped to ther values. \"\"\"", "range(num_of_coeff): if coeff_ranges[coeff_num][0] is None: lower_bound = -solver.Infinity() else: lower_bound", "= len(coeff_ranges) _generate_objective_fn(solver, num_of_coeff, variables) _generate_constraints(solver, points, num_of_coeff, variables) solver.Solve()", "if a specific solver instance is requested by caller. Returns:", "coefficients for any order polynomial. Args: points: A tuple of", "Overview: Objective: Sum of errors Subject to: Bounds on coefficients", "of polynomial, from highest order (0th index) to lowest order", "= (0,1), (1,3), (2,2), (3,4), (4,5) coeff_ranges = ((None, None),", "pywraplp.Solver( 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) variables = _generate_variables( solver, points, coeff_ranges, err_max=err_max,", "m, b = optimized_coefficients # print('Optimized m: {}, b: {}'.format(m,", "desired coefficients mapped to ther values. \"\"\" if coeff_ranges is", "'_minus') variables.append(positive_err_var) variables.append(negative_err_var) return variables def _generate_objective_fn( solver, num_of_coeff, variables,", "given error definition.\"\"\" objective = solver.Objective() for variable in variables[num_of_coeff:]:", "a specific solver instance is requested by caller. Returns: A", "as tuples (x, y) coeff_ranges: A tuple of valid coefficient", "coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_19points(): print('STARTING QUADRATIC DEMO WITH 19 POINTS FROM", "tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges))", "coeff_num - 1 x_val = point[0] ** power constraint.SetCoefficient(coeff, x_val)", "\"\"\" from __future__ import absolute_import from __future__ import division from", "= coeff_ranges[coeff_num][1] variables.append( solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num])) # Add absolute error", "valid coefficient ranges, respresented as tuples (min, max). Nubmer of", "is None: upper_bound = solver.Infinity() else: upper_bound = coeff_ranges[coeff_num][1] variables.append(", "variables[num_of_coeff + 2 * point_num + 1] constraint.SetCoefficient(ex_plus, -1) constraint.SetCoefficient(ex_minus,", "1) + '_plus') negative_err_var = solver.NumVar( 0, err_max, 'e' +", "solver=None): \"\"\"Optimize coefficients for any order polynomial. Args: points: A", "allowable. solver: a ortools.pywraplp.Solver object, if a specific solver instance", "LINEAR DEMO WITH 10 POINTS FROM WILLIAMS') x_vals = [0.0,", "solver.Objective() for variable in variables[num_of_coeff:]: objective.SetCoefficient(variable, 1) return objective def", "naming scheme for arbitrary number of variables. \"\"\" num_of_coeff =", "1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5] y_vals = [1.0,", "3.2, 2.0, 2.7, 3.5] y_vals.extend([1.0, 4.0, 3.6, 2.7, 5.7, 4.6,", "error_def=ErrorDefinition.SUM_ABS_DEV): \"\"\"Generate objective function for given error definition.\"\"\" objective =", "1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5] x_vals.extend([5.0, 5.5,", "An ErrorDefinition enum, specifying the definition for error. err_max: An", "= [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7,", "<NAME> and <NAME> \"\"\" from __future__ import absolute_import from __future__", "points, coeff_ranges, err_max=err_max, error_def=error_def) num_of_coeff = len(coeff_ranges) _generate_objective_fn(solver, num_of_coeff, variables)", "enumerate(variables[:num_of_coeff]): power = num_of_coeff - coeff_num - 1 x_val =", "definition for error. err_max: An Integer, specifying the maximum error", "for a given polynomial. Overview: Objective: Sum of errors Subject", "(None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_19points(): print('STARTING QUADRATIC DEMO WITH", "1.9, 2.5, 3.0, 3.5, 4.0, 4.5] x_vals.extend([5.0, 5.5, 6.0, 6.6,", "solver: a ortools.pywraplp.Solver object, if a specific solver instance is", "find the optimal coefficients for a given polynomial. Overview: Objective:", "2.0, 2.4, 3.2, 2.0, 2.7, 3.5] points = tuple(zip(x_vals, y_vals))", "= variables[num_of_coeff + 2 * point_num + 1] constraint.SetCoefficient(ex_plus, -1)", "+ 1] constraint.SetCoefficient(ex_plus, -1) constraint.SetCoefficient(ex_minus, 1) constraints.append(constraint) return constraints def", "coeff_num, coeff in enumerate(variables[:num_of_coeff]): power = num_of_coeff - coeff_num -", "4.0, 4.5] x_vals.extend([5.0, 5.5, 6.0, 6.6, 7.0, 7.6, 8.5, 9.0,", "2.5, 3.0, 3.5, 4.0, 4.5] y_vals = [1.0, 0.9, 0.7,", "else: lower_bound = coeff_ranges[coeff_num][0] if coeff_ranges[coeff_num][1] is None: upper_bound =", "\"\"\" print('STARTING LINEAR DEMO WITH 5 POINTS FROM SWANSON PAPER')", "coefficients Credit: \"Curve Fitting with Linear Programming\", <NAME> and <NAME>", "variables): constraints = [] for point_num, point in enumerate(points): #", "coefficient variables. Initial version works for up to 26 variable", "5 points from Swanson's curve fitting paper. \"\"\" print('STARTING LINEAR", "polynomial. Uses 5 points from Swanson's curve fitting paper. \"\"\"", "Coefficient terms for coeff_num, coeff in enumerate(variables[:num_of_coeff]): power = num_of_coeff", "in optimized_coefficients: print('elm: {}'.format(elm)) print( 'type(optimized_coefficients): {}'.format( type(optimized_coefficients))) print('optimized_coefficients: {}'.format(optimized_coefficients))", "= dict() for coeff in variables[:num_of_coeff]: var_to_val[coeff.name()] = coeff.solution_value() return", "None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_10points(): print('STARTING QUADRATIC DEMO", "values. \"\"\" if coeff_ranges is None: raise ValueError('Please provide appropriate", "# Equivalency constraint constraint = solver.Constraint(point[1], point[1]) # Resultant Coefficient", "constraints = [] for point_num, point in enumerate(points): # Equivalency", "with linear programming. Minimizes the sum of error for each", "objective.SetCoefficient(variable, 1) return objective def _generate_constraints(solver, points, num_of_coeff, variables): constraints", "error_def=error_def) num_of_coeff = len(coeff_ranges) _generate_objective_fn(solver, num_of_coeff, variables) _generate_constraints(solver, points, num_of_coeff,", "num_of_coeff == 2: coeff_names.append('m') coeff_names.append('b') else: for letter_cnt in range(num_of_coeff):", "= _generate_variables( solver, points, coeff_ranges, err_max=err_max, error_def=error_def) num_of_coeff = len(coeff_ranges)", "4.5] y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4, 3.2,", "for given error definition.\"\"\" objective = solver.Objective() for variable in", "= optimized_coefficients # print('Optimized m: {}, b: {}'.format(m, b)) def", "Integer, specifying the maximum error allowable. solver: a ortools.pywraplp.Solver object,", "0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5] x_vals.extend([5.0,", "if num_of_coeff == 2: coeff_names.append('m') coeff_names.append('b') else: for letter_cnt in", "import absolute_import from __future__ import division from __future__ import print_function", "provide appropriate coefficient range.') if solver is None: solver =", "for any order polynomial. Args: points: A tuple of points,", "WITH 19 POINTS FROM WILLIAMS') x_vals = [0.0, 0.5, 1.0,", "index). err_def: An ErrorDefinition enum, specifying the definition for error.", "None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def main(): demo_optimal_quadratic_19points()", "requested by caller. Returns: A Dictionary, the desired coefficients mapped", "tuples (min, max). Nubmer of elements in list determines order", "3.0, 3.5, 4.0, 4.5] x_vals.extend([5.0, 5.5, 6.0, 6.6, 7.0, 7.6,", "Uses 5 points from Swanson's curve fitting paper. \"\"\" print('STARTING", "def _generate_constraints(solver, points, num_of_coeff, variables): constraints = [] for point_num,", "upper_bound = coeff_ranges[coeff_num][1] variables.append( solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num])) # Add absolute", "ortools.pywraplp.Solver object, if a specific solver instance is requested by", "Bounds on coefficients Credit: \"Curve Fitting with Linear Programming\", <NAME>", "def demo_optimal_quadratic_19points(): print('STARTING QUADRATIC DEMO WITH 19 POINTS FROM WILLIAMS')", "5.7, 4.6, 6.0, 6.8, 7.3]) points = tuple(zip(x_vals, y_vals)) coeff_ranges", "coefficient range.') if solver is None: solver = pywraplp.Solver( 'polynomial_solver',", "enum, specifying the definition for error. err_max: An Integer, specifying", "(None, None)) # solver = pywraplp.Solver( # 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) optimized_coefficients", "None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_10points(): print('STARTING QUADRATIC DEMO WITH 10", "as tuples (min, max). Nubmer of elements in list determines", "2.7, 3.5] points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None),", "{}'.format(elm)) print( 'type(optimized_coefficients): {}'.format( type(optimized_coefficients))) print('optimized_coefficients: {}'.format(optimized_coefficients)) # m, b", "variables. Initial version works for up to 26 variable polynomial.", "raise ValueError('Please provide appropriate coefficient range.') if solver is None:", "polynomial. Args: points: A tuple of points, represented as tuples", "return variables def _generate_objective_fn( solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV): \"\"\"Generate objective", "= [] # Add coefficients to variable list. if num_of_coeff", "demo_optimal_cubic_10points(): print('STARTING CUBIC DEMO WITH 10 POINTS FROM WILLIAMS') x_vals", "names. TODO(drofp): Figure out naming scheme for arbitrary number of", "var_to_val[coeff.name()] = coeff.solution_value() return var_to_val def demo_optimal_linear_5points(): \"\"\"Demonstration of getting", "PAPER') points = (0,1), (1,3), (2,2), (3,4), (4,5) coeff_ranges =", "= ((None, None), (None, None)) # solver = pywraplp.Solver( #", "error for each fit point to find the optimal coefficients", "y) coeff_ranges: A tuple of valid coefficient ranges, respresented as", "__future__ import print_function import enum import string from ortools.linear_solver import", "the desired coefficients mapped to ther values. \"\"\" if coeff_ranges", "for arbitrary number of variables. \"\"\" num_of_coeff = len(coeff_ranges) variables", "objective def _generate_constraints(solver, points, num_of_coeff, variables): constraints = [] for", "None: upper_bound = solver.Infinity() else: upper_bound = coeff_ranges[coeff_num][1] variables.append( solver.NumVar(lower_bound,", "point[0] ** power constraint.SetCoefficient(coeff, x_val) # Error terms ex_plus =", "+ '_plus') negative_err_var = solver.NumVar( 0, err_max, 'e' + str(point_cnt", "tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None, None), (None, None))", "\"\"\"Create coefficient variables. Initial version works for up to 26", "getting optimal linear polynomial. Uses 5 points from Swanson's curve", "error. err_max: An Integer, specifying the maximum error allowable. solver:", "positive_err_var = solver.NumVar( 0, err_max, 'e' + str(point_cnt + 1)", "import enum import string from ortools.linear_solver import pywraplp class ErrorDefinition(enum.Enum):", "coeff.solution_value() return var_to_val def demo_optimal_linear_5points(): \"\"\"Demonstration of getting optimal linear", "optimized_coefficients = get_optimal_polynomial( points=points, coeff_ranges=coeff_ranges) for elm in optimized_coefficients: print('elm:", "Swanson's curve fitting paper. \"\"\" print('STARTING LINEAR DEMO WITH 5", "a given polynomial. Overview: Objective: Sum of errors Subject to:", "[0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5]", "points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None, None))", "def _generate_objective_fn( solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV): \"\"\"Generate objective function for", "= ((None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def", "= coeff.solution_value() return var_to_val def demo_optimal_linear_5points(): \"\"\"Demonstration of getting optimal", "7.3]) points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None,", "import division from __future__ import print_function import enum import string", "= pywraplp.Solver( 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) variables = _generate_variables( solver, points, coeff_ranges,", "ValueError('Please provide appropriate coefficient range.') if solver is None: solver", "variables.append(positive_err_var) variables.append(negative_err_var) return variables def _generate_objective_fn( solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV):", "by caller. Returns: A Dictionary, the desired coefficients mapped to", "A tuple of valid coefficient ranges, respresented as tuples (min,", "pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) optimized_coefficients = get_optimal_polynomial( points=points, coeff_ranges=coeff_ranges) for elm in optimized_coefficients:", "= solver.Objective() for variable in variables[num_of_coeff:]: objective.SetCoefficient(variable, 1) return objective", "2: coeff_names.append('m') coeff_names.append('b') else: for letter_cnt in range(num_of_coeff): coeff_names.append(string.ascii_lowercase[letter_cnt]) for", "x_vals.extend([5.0, 5.5, 6.0, 6.6, 7.0, 7.6, 8.5, 9.0, 10.0]) y_vals", "error_def): \"\"\"Create coefficient variables. Initial version works for up to", "+ 2 * point_num] ex_minus = variables[num_of_coeff + 2 *", "fit point to find the optimal coefficients for a given", "* point_num] ex_minus = variables[num_of_coeff + 2 * point_num +", "ranges, respresented as tuples (min, max). Nubmer of elements in", "point_cnt in range(len(points)): positive_err_var = solver.NumVar( 0, err_max, 'e' +", "((None, None), (None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges))", "3.5, 4.0, 4.5] x_vals.extend([5.0, 5.5, 6.0, 6.6, 7.0, 7.6, 8.5,", "print('STARTING CUBIC DEMO WITH 10 POINTS FROM WILLIAMS') x_vals =", "objective = solver.Objective() for variable in variables[num_of_coeff:]: objective.SetCoefficient(variable, 1) return", "return objective def _generate_constraints(solver, points, num_of_coeff, variables): constraints = []", "SUM_MAX_DEVIATION = enum.auto() def _generate_variables(solver, points, coeff_ranges, err_max, error_def): \"\"\"Create", "caller. Returns: A Dictionary, the desired coefficients mapped to ther", "points, num_of_coeff, variables): constraints = [] for point_num, point in", "Figure out naming scheme for arbitrary number of variables. \"\"\"", "Nubmer of elements in list determines order of polynomial, from", "division from __future__ import print_function import enum import string from", "+ 1) + '_minus') variables.append(positive_err_var) variables.append(negative_err_var) return variables def _generate_objective_fn(", "[] for point_num, point in enumerate(points): # Equivalency constraint constraint", "to lowest order (nth index). err_def: An ErrorDefinition enum, specifying", "in enumerate(variables[:num_of_coeff]): power = num_of_coeff - coeff_num - 1 x_val", "WITH 10 POINTS FROM WILLIAMS') x_vals = [0.0, 0.5, 1.0,", "variables) _generate_constraints(solver, points, num_of_coeff, variables) solver.Solve() var_to_val = dict() for", "variables.append( solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num])) # Add absolute error variables to", "points = (0,1), (1,3), (2,2), (3,4), (4,5) coeff_ranges = ((None,", "print('STARTING QUADRATIC DEMO WITH 19 POINTS FROM WILLIAMS') x_vals =", "_generate_objective_fn( solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV): \"\"\"Generate objective function for given", "tuples (x, y) coeff_ranges: A tuple of valid coefficient ranges,", "((None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_cubic_10points():", "b)) def demo_optimal_linear_10points(): print('STARTING LINEAR DEMO WITH 10 POINTS FROM", "None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_19points(): print('STARTING QUADRATIC DEMO", "0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5] points", "19 POINTS FROM WILLIAMS') x_vals = [0.0, 0.5, 1.0, 1.5,", "for coefficient names. TODO(drofp): Figure out naming scheme for arbitrary", "coeff_ranges[coeff_num][0] is None: lower_bound = -solver.Infinity() else: lower_bound = coeff_ranges[coeff_num][0]", "1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5] points = tuple(zip(x_vals,", "solver = pywraplp.Solver( 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) variables = _generate_variables( solver, points,", "import string from ortools.linear_solver import pywraplp class ErrorDefinition(enum.Enum): SUM_ABS_DEV =", "ortools.linear_solver import pywraplp class ErrorDefinition(enum.Enum): SUM_ABS_DEV = enum.auto() SUM_MAX_DEVIATION =", "err_max=err_max, error_def=error_def) num_of_coeff = len(coeff_ranges) _generate_objective_fn(solver, num_of_coeff, variables) _generate_constraints(solver, points,", "ErrorDefinition enum, specifying the definition for error. err_max: An Integer,", "** power constraint.SetCoefficient(coeff, x_val) # Error terms ex_plus = variables[num_of_coeff", "solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num])) # Add absolute error variables to variable", "number of variables. \"\"\" num_of_coeff = len(coeff_ranges) variables = []", "point_num, point in enumerate(points): # Equivalency constraint constraint = solver.Constraint(point[1],", "2.4, 3.2, 2.0, 2.7, 3.5] y_vals.extend([1.0, 4.0, 3.6, 2.7, 5.7,", "print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def main(): demo_optimal_quadratic_19points() if __name__ == '__main__': main()", "(None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def main():", "+ 1) + '_plus') negative_err_var = solver.NumVar( 0, err_max, 'e'", "Add coefficients to variable list. if num_of_coeff == 2: coeff_names.append('m')", "variables to variable list for point_cnt in range(len(points)): positive_err_var =", "is requested by caller. Returns: A Dictionary, the desired coefficients", "lowest order (nth index). err_def: An ErrorDefinition enum, specifying the", "highest order (0th index) to lowest order (nth index). err_def:", "0.9, 0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5] y_vals.extend([1.0,", "Initial version works for up to 26 variable polynomial. One", "up to 26 variable polynomial. One letter per english alphabet", "__future__ import absolute_import from __future__ import division from __future__ import", "- 1 x_val = point[0] ** power constraint.SetCoefficient(coeff, x_val) #", "-solver.Infinity() else: lower_bound = coeff_ranges[coeff_num][0] if coeff_ranges[coeff_num][1] is None: upper_bound", "error variables to variable list for point_cnt in range(len(points)): positive_err_var", "Credit: \"Curve Fitting with Linear Programming\", <NAME> and <NAME> \"\"\"", "polynomial. One letter per english alphabet used for coefficient names.", "_generate_constraints(solver, points, num_of_coeff, variables) solver.Solve() var_to_val = dict() for coeff", "coeff_ranges, err_max, error_def): \"\"\"Create coefficient variables. Initial version works for", "letter per english alphabet used for coefficient names. TODO(drofp): Figure", "WILLIAMS') x_vals = [0.0, 0.5, 1.0, 1.5, 1.9, 2.5, 3.0,", "None), (None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def", "None)) # solver = pywraplp.Solver( # 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) optimized_coefficients =", "for coeff in variables[:num_of_coeff]: var_to_val[coeff.name()] = coeff.solution_value() return var_to_val def", "of getting optimal linear polynomial. Uses 5 points from Swanson's", "Returns: A Dictionary, the desired coefficients mapped to ther values.", "solver instance is requested by caller. Returns: A Dictionary, the", "variables) solver.Solve() var_to_val = dict() for coeff in variables[:num_of_coeff]: var_to_val[coeff.name()]", "error_def=ErrorDefinition.SUM_ABS_DEV, err_max=10000, solver=None): \"\"\"Optimize coefficients for any order polynomial. Args:", "# Error terms ex_plus = variables[num_of_coeff + 2 * point_num]", "Linear Programming\", <NAME> and <NAME> \"\"\" from __future__ import absolute_import", "= len(coeff_ranges) variables = [] coeff_names = [] # Add", "demo_optimal_quadratic_10points(): print('STARTING QUADRATIC DEMO WITH 10 POINTS FROM WILLIAMS') x_vals", "per english alphabet used for coefficient names. TODO(drofp): Figure out", "the maximum error allowable. solver: a ortools.pywraplp.Solver object, if a", "respresented as tuples (min, max). Nubmer of elements in list", "num_of_coeff = len(coeff_ranges) variables = [] coeff_names = [] #", "for error. err_max: An Integer, specifying the maximum error allowable.", "enum.auto() def _generate_variables(solver, points, coeff_ranges, err_max, error_def): \"\"\"Create coefficient variables.", "None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def main(): demo_optimal_quadratic_19points() if __name__", "variables. \"\"\" num_of_coeff = len(coeff_ranges) variables = [] coeff_names =", "+ 2 * point_num + 1] constraint.SetCoefficient(ex_plus, -1) constraint.SetCoefficient(ex_minus, 1)", "{}'.format(optimized_coefficients)) # m, b = optimized_coefficients # print('Optimized m: {},", "a ortools.pywraplp.Solver object, if a specific solver instance is requested", "2.4, 3.2, 2.0, 2.7, 3.5] points = tuple(zip(x_vals, y_vals)) coeff_ranges", "3.2, 2.0, 2.7, 3.5] points = tuple(zip(x_vals, y_vals)) coeff_ranges =", "for variable in variables[num_of_coeff:]: objective.SetCoefficient(variable, 1) return objective def _generate_constraints(solver,", "solver.Solve() var_to_val = dict() for coeff in variables[:num_of_coeff]: var_to_val[coeff.name()] =", "6.6, 7.0, 7.6, 8.5, 9.0, 10.0]) y_vals = [1.0, 0.9,", "function for given error definition.\"\"\" objective = solver.Objective() for variable", "error allowable. solver: a ortools.pywraplp.Solver object, if a specific solver", "to 26 variable polynomial. One letter per english alphabet used", "print('optimized_coefficients: {}'.format(optimized_coefficients)) # m, b = optimized_coefficients # print('Optimized m:", "coeff_names[coeff_num])) # Add absolute error variables to variable list for", "def demo_optimal_linear_5points(): \"\"\"Demonstration of getting optimal linear polynomial. Uses 5", "FROM SWANSON PAPER') points = (0,1), (1,3), (2,2), (3,4), (4,5)", "of errors Subject to: Bounds on coefficients Credit: \"Curve Fitting", "= [] for point_num, point in enumerate(points): # Equivalency constraint", "7.0, 7.6, 8.5, 9.0, 10.0]) y_vals = [1.0, 0.9, 0.7,", "point_num] ex_minus = variables[num_of_coeff + 2 * point_num + 1]", "# Resultant Coefficient terms for coeff_num, coeff in enumerate(variables[:num_of_coeff]): power", "coeff_names.append('b') else: for letter_cnt in range(num_of_coeff): coeff_names.append(string.ascii_lowercase[letter_cnt]) for coeff_num in", "Error terms ex_plus = variables[num_of_coeff + 2 * point_num] ex_minus", "0.7, 1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5] points =", "ex_plus = variables[num_of_coeff + 2 * point_num] ex_minus = variables[num_of_coeff", "variables = [] coeff_names = [] # Add coefficients to", "with Linear Programming\", <NAME> and <NAME> \"\"\" from __future__ import", "tuple of valid coefficient ranges, respresented as tuples (min, max).", "order (0th index) to lowest order (nth index). err_def: An", "_generate_objective_fn(solver, num_of_coeff, variables) _generate_constraints(solver, points, num_of_coeff, variables) solver.Solve() var_to_val =", "pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) variables = _generate_variables( solver, points, coeff_ranges, err_max=err_max, error_def=error_def) num_of_coeff", "programming. Minimizes the sum of error for each fit point", "list for point_cnt in range(len(points)): positive_err_var = solver.NumVar( 0, err_max,", "y_vals)) coeff_ranges = ((None, None), (None, None), (None, None), (None,", "var_to_val def demo_optimal_linear_5points(): \"\"\"Demonstration of getting optimal linear polynomial. Uses", "points: A tuple of points, represented as tuples (x, y)", "6.0, 6.6, 7.0, 7.6, 8.5, 9.0, 10.0]) y_vals = [1.0,", "= num_of_coeff - coeff_num - 1 x_val = point[0] **", "1.5, 2.0, 2.4, 3.2, 2.0, 2.7, 3.5] y_vals.extend([1.0, 4.0, 3.6,", "pywraplp class ErrorDefinition(enum.Enum): SUM_ABS_DEV = enum.auto() SUM_MAX_DEVIATION = enum.auto() def", "= [] coeff_names = [] # Add coefficients to variable", "LINEAR DEMO WITH 5 POINTS FROM SWANSON PAPER') points =", "#!/usr/bin/env python3 \"\"\"Curve fitting with linear programming. Minimizes the sum", "(1,3), (2,2), (3,4), (4,5) coeff_ranges = ((None, None), (None, None))", "in range(len(points)): positive_err_var = solver.NumVar( 0, err_max, 'e' + str(point_cnt", "2.0, 2.7, 3.5] points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None,", "to: Bounds on coefficients Credit: \"Curve Fitting with Linear Programming\",", "fitting paper. \"\"\" print('STARTING LINEAR DEMO WITH 5 POINTS FROM", "constraint = solver.Constraint(point[1], point[1]) # Resultant Coefficient terms for coeff_num,", "type(optimized_coefficients))) print('optimized_coefficients: {}'.format(optimized_coefficients)) # m, b = optimized_coefficients # print('Optimized", "print('STARTING QUADRATIC DEMO WITH 10 POINTS FROM WILLIAMS') x_vals =", "points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV, err_max=10000, solver=None): \"\"\"Optimize coefficients for any order", "fitting with linear programming. Minimizes the sum of error for", "'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) optimized_coefficients = get_optimal_polynomial( points=points, coeff_ranges=coeff_ranges) for elm in", "9.0, 10.0]) y_vals = [1.0, 0.9, 0.7, 1.5, 2.0, 2.4,", "coefficients mapped to ther values. \"\"\" if coeff_ranges is None:", "lower_bound = -solver.Infinity() else: lower_bound = coeff_ranges[coeff_num][0] if coeff_ranges[coeff_num][1] is", "1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5] x_vals.extend([5.0, 5.5, 6.0,", "coeff_ranges = ((None, None), (None, None), (None, None), (None, None))", "{}'.format( type(optimized_coefficients))) print('optimized_coefficients: {}'.format(optimized_coefficients)) # m, b = optimized_coefficients #", "8.5, 9.0, 10.0]) y_vals = [1.0, 0.9, 0.7, 1.5, 2.0,", "None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_19points(): print('STARTING QUADRATIC DEMO WITH 19", "import print_function import enum import string from ortools.linear_solver import pywraplp", "print('Optimized m: {}, b: {}'.format(m, b)) def demo_optimal_linear_10points(): print('STARTING LINEAR", "(2,2), (3,4), (4,5) coeff_ranges = ((None, None), (None, None)) #", "enum.auto() SUM_MAX_DEVIATION = enum.auto() def _generate_variables(solver, points, coeff_ranges, err_max, error_def):", "coeff_ranges = ((None, None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges))", "points, represented as tuples (x, y) coeff_ranges: A tuple of", "range.') if solver is None: solver = pywraplp.Solver( 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)", "\"\"\"Demonstration of getting optimal linear polynomial. Uses 5 points from", "else: for letter_cnt in range(num_of_coeff): coeff_names.append(string.ascii_lowercase[letter_cnt]) for coeff_num in range(num_of_coeff):", "y_vals.extend([1.0, 4.0, 3.6, 2.7, 5.7, 4.6, 6.0, 6.8, 7.3]) points", "len(coeff_ranges) variables = [] coeff_names = [] # Add coefficients", "Minimizes the sum of error for each fit point to", "= -solver.Infinity() else: lower_bound = coeff_ranges[coeff_num][0] if coeff_ranges[coeff_num][1] is None:", "lower_bound = coeff_ranges[coeff_num][0] if coeff_ranges[coeff_num][1] is None: upper_bound = solver.Infinity()", "index) to lowest order (nth index). err_def: An ErrorDefinition enum,", "_generate_variables(solver, points, coeff_ranges, err_max, error_def): \"\"\"Create coefficient variables. Initial version", "(None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_10points(): print('STARTING QUADRATIC DEMO WITH", "None: raise ValueError('Please provide appropriate coefficient range.') if solver is", "Resultant Coefficient terms for coeff_num, coeff in enumerate(variables[:num_of_coeff]): power =", "terms for coeff_num, coeff in enumerate(variables[:num_of_coeff]): power = num_of_coeff -", "constraint.SetCoefficient(coeff, x_val) # Error terms ex_plus = variables[num_of_coeff + 2", "None: lower_bound = -solver.Infinity() else: lower_bound = coeff_ranges[coeff_num][0] if coeff_ranges[coeff_num][1]", "{}, b: {}'.format(m, b)) def demo_optimal_linear_10points(): print('STARTING LINEAR DEMO WITH", "Fitting with Linear Programming\", <NAME> and <NAME> \"\"\" from __future__", "\"\"\"Optimize coefficients for any order polynomial. Args: points: A tuple", "__future__ import division from __future__ import print_function import enum import", "None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_cubic_10points(): print('STARTING CUBIC DEMO", "get_optimal_polynomial( points=points, coeff_ranges=coeff_ranges) for elm in optimized_coefficients: print('elm: {}'.format(elm)) print(", "coeff_ranges: A tuple of valid coefficient ranges, respresented as tuples", "english alphabet used for coefficient names. TODO(drofp): Figure out naming", "A tuple of points, represented as tuples (x, y) coeff_ranges:", "= solver.Constraint(point[1], point[1]) # Resultant Coefficient terms for coeff_num, coeff", "print('STARTING LINEAR DEMO WITH 10 POINTS FROM WILLIAMS') x_vals =", "list determines order of polynomial, from highest order (0th index)", "print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_cubic_10points(): print('STARTING CUBIC DEMO WITH 10 POINTS", "enumerate(points): # Equivalency constraint constraint = solver.Constraint(point[1], point[1]) # Resultant", "elm in optimized_coefficients: print('elm: {}'.format(elm)) print( 'type(optimized_coefficients): {}'.format( type(optimized_coefficients))) print('optimized_coefficients:", "for point_num, point in enumerate(points): # Equivalency constraint constraint =", "2.7, 3.5] y_vals.extend([1.0, 4.0, 3.6, 2.7, 5.7, 4.6, 6.0, 6.8,", "coefficient ranges, respresented as tuples (min, max). Nubmer of elements", "# m, b = optimized_coefficients # print('Optimized m: {}, b:", "err_max, 'e' + str(point_cnt + 1) + '_minus') variables.append(positive_err_var) variables.append(negative_err_var)", "3.0, 3.5, 4.0, 4.5] y_vals = [1.0, 0.9, 0.7, 1.5,", "Args: points: A tuple of points, represented as tuples (x,", "None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_cubic_10points(): print('STARTING", "version works for up to 26 variable polynomial. One letter", "solver.NumVar( 0, err_max, 'e' + str(point_cnt + 1) + '_plus')", "used for coefficient names. TODO(drofp): Figure out naming scheme for", "of points, represented as tuples (x, y) coeff_ranges: A tuple", "power = num_of_coeff - coeff_num - 1 x_val = point[0]", "for each fit point to find the optimal coefficients for", "QUADRATIC DEMO WITH 19 POINTS FROM WILLIAMS') x_vals = [0.0,", "coeff_ranges[coeff_num][0] if coeff_ranges[coeff_num][1] is None: upper_bound = solver.Infinity() else: upper_bound", "None), (None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_19points(): print('STARTING", "definition.\"\"\" objective = solver.Objective() for variable in variables[num_of_coeff:]: objective.SetCoefficient(variable, 1)", "from highest order (0th index) to lowest order (nth index).", "return constraints def get_optimal_polynomial( points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV, err_max=10000, solver=None): \"\"\"Optimize", "coeff_ranges=coeff_ranges)) def demo_optimal_cubic_10points(): print('STARTING CUBIC DEMO WITH 10 POINTS FROM", "3.6, 2.7, 5.7, 4.6, 6.0, 6.8, 7.3]) points = tuple(zip(x_vals,", "coefficients for a given polynomial. Overview: Objective: Sum of errors", "TODO(drofp): Figure out naming scheme for arbitrary number of variables.", "SUM_ABS_DEV = enum.auto() SUM_MAX_DEVIATION = enum.auto() def _generate_variables(solver, points, coeff_ranges,", "variable list for point_cnt in range(len(points)): positive_err_var = solver.NumVar( 0,", "6.8, 7.3]) points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None),", "CUBIC DEMO WITH 10 POINTS FROM WILLIAMS') x_vals = [0.0,", "coefficients to variable list. if num_of_coeff == 2: coeff_names.append('m') coeff_names.append('b')", "3.5] y_vals.extend([1.0, 4.0, 3.6, 2.7, 5.7, 4.6, 6.0, 6.8, 7.3])", "elements in list determines order of polynomial, from highest order", "'type(optimized_coefficients): {}'.format( type(optimized_coefficients))) print('optimized_coefficients: {}'.format(optimized_coefficients)) # m, b = optimized_coefficients", "x_val = point[0] ** power constraint.SetCoefficient(coeff, x_val) # Error terms", "solver.Infinity() else: upper_bound = coeff_ranges[coeff_num][1] variables.append( solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num])) #", "= tuple(zip(x_vals, y_vals)) coeff_ranges = ((None, None), (None, None), (None,", "to variable list. if num_of_coeff == 2: coeff_names.append('m') coeff_names.append('b') else:", "in range(num_of_coeff): if coeff_ranges[coeff_num][0] is None: lower_bound = -solver.Infinity() else:", "in enumerate(points): # Equivalency constraint constraint = solver.Constraint(point[1], point[1]) #", "constraint constraint = solver.Constraint(point[1], point[1]) # Resultant Coefficient terms for", "print('elm: {}'.format(elm)) print( 'type(optimized_coefficients): {}'.format( type(optimized_coefficients))) print('optimized_coefficients: {}'.format(optimized_coefficients)) # m,", "coeff_names.append('m') coeff_names.append('b') else: for letter_cnt in range(num_of_coeff): coeff_names.append(string.ascii_lowercase[letter_cnt]) for coeff_num", "1) + '_minus') variables.append(positive_err_var) variables.append(negative_err_var) return variables def _generate_objective_fn( solver,", "absolute_import from __future__ import division from __future__ import print_function import", "objective function for given error definition.\"\"\" objective = solver.Objective() for", "(None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def main(): demo_optimal_quadratic_19points() if __name__ ==", "solver, points, coeff_ranges, err_max=err_max, error_def=error_def) num_of_coeff = len(coeff_ranges) _generate_objective_fn(solver, num_of_coeff,", "((None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_quadratic_10points(): print('STARTING QUADRATIC", "None), (None, None)) # solver = pywraplp.Solver( # 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING)", "None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def demo_optimal_cubic_10points(): print('STARTING CUBIC DEMO WITH 10", "specific solver instance is requested by caller. Returns: A Dictionary,", "tuple of points, represented as tuples (x, y) coeff_ranges: A", "QUADRATIC DEMO WITH 10 POINTS FROM WILLIAMS') x_vals = [0.0,", "point[1]) # Resultant Coefficient terms for coeff_num, coeff in enumerate(variables[:num_of_coeff]):", "else: upper_bound = coeff_ranges[coeff_num][1] variables.append( solver.NumVar(lower_bound, upper_bound, coeff_names[coeff_num])) # Add", "from Swanson's curve fitting paper. \"\"\" print('STARTING LINEAR DEMO WITH", "POINTS FROM WILLIAMS') x_vals = [0.0, 0.5, 1.0, 1.5, 1.9,", "26 variable polynomial. One letter per english alphabet used for", "polynomial, from highest order (0th index) to lowest order (nth", "An Integer, specifying the maximum error allowable. solver: a ortools.pywraplp.Solver", "6.0, 6.8, 7.3]) points = tuple(zip(x_vals, y_vals)) coeff_ranges = ((None,", "points=points, coeff_ranges=coeff_ranges) for elm in optimized_coefficients: print('elm: {}'.format(elm)) print( 'type(optimized_coefficients):", "4.5] x_vals.extend([5.0, 5.5, 6.0, 6.6, 7.0, 7.6, 8.5, 9.0, 10.0])", "POINTS FROM SWANSON PAPER') points = (0,1), (1,3), (2,2), (3,4),", "2.5, 3.0, 3.5, 4.0, 4.5] x_vals.extend([5.0, 5.5, 6.0, 6.6, 7.0,", "None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def main(): demo_optimal_quadratic_19points() if __name__ == '__main__':", "2 * point_num] ex_minus = variables[num_of_coeff + 2 * point_num", "specifying the maximum error allowable. solver: a ortools.pywraplp.Solver object, if", "'_plus') negative_err_var = solver.NumVar( 0, err_max, 'e' + str(point_cnt +", "variables[num_of_coeff + 2 * point_num] ex_minus = variables[num_of_coeff + 2", "SWANSON PAPER') points = (0,1), (1,3), (2,2), (3,4), (4,5) coeff_ranges", "solver, num_of_coeff, variables, error_def=ErrorDefinition.SUM_ABS_DEV): \"\"\"Generate objective function for given error", "0.5, 1.0, 1.5, 1.9, 2.5, 3.0, 3.5, 4.0, 4.5] y_vals", "def demo_optimal_cubic_10points(): print('STARTING CUBIC DEMO WITH 10 POINTS FROM WILLIAMS')", "for elm in optimized_coefficients: print('elm: {}'.format(elm)) print( 'type(optimized_coefficients): {}'.format( type(optimized_coefficients)))", "y_vals)) coeff_ranges = ((None, None), (None, None)) print(get_optimal_polynomial(points=points, coeff_ranges=coeff_ranges)) def", "'e' + str(point_cnt + 1) + '_minus') variables.append(positive_err_var) variables.append(negative_err_var) return", "upper_bound = solver.Infinity() else: upper_bound = coeff_ranges[coeff_num][1] variables.append( solver.NumVar(lower_bound, upper_bound,", "mapped to ther values. \"\"\" if coeff_ranges is None: raise", "def get_optimal_polynomial( points=None, coeff_ranges=None, error_def=ErrorDefinition.SUM_ABS_DEV, err_max=10000, solver=None): \"\"\"Optimize coefficients for", "coeff_ranges=coeff_ranges) for elm in optimized_coefficients: print('elm: {}'.format(elm)) print( 'type(optimized_coefficients): {}'.format(", "pywraplp.Solver( # 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) optimized_coefficients = get_optimal_polynomial( points=points, coeff_ranges=coeff_ranges) for", "is None: lower_bound = -solver.Infinity() else: lower_bound = coeff_ranges[coeff_num][0] if", "# Add absolute error variables to variable list for point_cnt", "if solver is None: solver = pywraplp.Solver( 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) variables", "# solver = pywraplp.Solver( # 'polynomial_solver', pywraplp.Solver.GLOP_LINEAR_PROGRAMMING) optimized_coefficients = get_optimal_polynomial(" ]
[ "try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='test', products=['%sPackageTests' % package_name], sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file,", "# Make the rpath to sourcekitd relative in the toolchain", "of %s failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd))", "bin_dir = os.path.join(install_dir, 'bin') lib_dir = os.path.join(install_dir, 'lib', 'swift', 'macosx')", "ought to be able to query SwiftPM for this info.", "Xcode workspace to create a unified build of SwiftSyntax with", "# Convert package_dir to absolute path, relative to root of", "ArgumentParser.error(\"'--prefix' is required with the install action\") parsed.swift_exec = os.path.join(parsed.toolchain,", "as e: printerr('FAIL: Building %s failed' % package_name) printerr('Executing: %s'", "sourcekitd relative in the toolchain rpaths_to_delete_for_this_product += [sourcekit_searchpath] rpaths_to_add +=", "print(\"** Building %s **\" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='build',", "args.extend(['--product', product]) # Tell SwiftSyntax that we are building in", "able to query SwiftPM for this info. if package_dir.endswith(\"/SourceKitStressTester\"): return", "modify it rpaths_to_delete_for_this_product = list(rpaths_to_delete) # Add the rpath to", "if should_run_action(\"install\", args.build_actions): print(\"** Installing %s **\" % package_name) stdlib_dir", "lib_dir = os.path.join(install_dir, 'lib', 'swift', 'macosx') for directory in [bin_dir,", "**\" % package_name) try: generate_xcodeproj(args.package_dir, swift_exec=args.swift_exec, sourcekit_searchpath=sourcekit_searchpath, env=env, verbose=args.verbose) except", "unified build of SwiftSyntax with other projects.') parser.add_argument('--toolchain', required=True, help='the", "parsed.package_dir)) # Convert build_dir to absolute path, relative to package_dir.", "verbose) def invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config,", "+= ['@executable_path/../lib'] install(src, dest, rpaths_to_delete=rpaths_to_delete_for_this_product, rpaths_to_add=rpaths_to_add, verbose=verbose) def install(src, dest,", "product in products: invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file,", "'generate-xcodeproj'], nargs=\"*\", default=['build']) parsed = parser.parse_args(args) if (\"install\" in parsed.build_actions", "action='test', products=['%sPackageTests' % package_name], sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose)", "def generate_xcodeproj(package_dir, swift_exec, sourcekit_searchpath, env, verbose): package_name = os.path.basename(package_dir) config_path", "the Xcode project failed') printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1)", "= list(rpaths_to_delete) # Add the rpath to the stdlib in", "not parsed.prefix: ArgumentParser.error(\"'--prefix' is required with the install action\") parsed.swift_exec", "'.join(e.cmd)) sys.exit(1) if should_run_action(\"test\", args.build_actions): print(\"** Testing %s **\" %", "just testing if should_run_any_action(['build', 'install'], args.build_actions): print(\"** Building %s **\"", "failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) output_dir", "check_call(args, env=env, verbose=verbose) def invoke_swift(package_dir, swift_exec, action, products, sourcekit_searchpath, build_dir,", "% (os.path.basename(src), dest)) check_call(copy_cmd, verbose=verbose) for rpath in rpaths_to_delete: remove_rpath(dest,", "in rpaths_to_delete: remove_rpath(dest, rpath, verbose=verbose) for rpath in rpaths_to_add: add_rpath(dest,", "= os.path.join(build_dir, product) dest = os.path.join(bin_dir, product) # Create a", "try: generate_xcodeproj(args.package_dir, swift_exec=args.swift_exec, sourcekit_searchpath=sourcekit_searchpath, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e:", "in the toolchain rpaths_to_delete_for_this_product += [sourcekit_searchpath] rpaths_to_add += ['@executable_path/../lib'] install(src,", "args.build_actions): print(\"** Generating Xcode project for %s **\" % package_name)", "[sourcekit_searchpath] rpaths_to_add += ['@executable_path/../lib'] install(src, dest, rpaths_to_delete=rpaths_to_delete_for_this_product, rpaths_to_add=rpaths_to_add, verbose=verbose) def", "printerr('FAIL: Installing %s failed' % package_name) printerr('Executing: %s' % '", "for the list of Swift project authors ------------------------------------------------------------------------------ This is", "rpaths_to_add, verbose): copy_cmd=['rsync', '-a', src, dest] print('installing %s to %s'", "in arg or ' ' in arg: return '\"%s\"' %", "remove_rpath(dest, rpath, verbose=verbose) for rpath in rpaths_to_add: add_rpath(dest, rpath, verbose=verbose)", "# The test action creates its own build. No need", "swift_exec=args.swift_exec, action='build', products=get_products(args.package_dir), sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except", "args.extend(['--multiroot-data-file', multiroot_data_file]) if action == 'test': args.extend(['--test-product', product]) else: args.extend(['--product',", "'--package-path', package_dir, 'generate-xcodeproj', '--xcconfig-overrides', config_path, '--output', xcodeproj_path] check_call(args, env=env, verbose=verbose)", "return parsed def run(args): sourcekit_searchpath=args.sourcekitd_dir package_name = os.path.basename(args.package_dir) env =", "in get_products(package_dir): src = os.path.join(build_dir, product) dest = os.path.join(bin_dir, product)", "import sys import os, platform import subprocess def printerr(message): print(message,", "in in the toolchain rpaths_to_add = ['@executable_path/../lib/swift/macosx'] if product in", "'package', '--package-path', package_dir, 'generate-xcodeproj', '--xcconfig-overrides', config_path, '--output', xcodeproj_path] check_call(args, env=env,", "verbose, env=os.environ, **kwargs): if verbose: print(' '.join([escape_cmd_arg(arg) for arg in", "Swift project authors Licensed under Apache License v2.0 with Runtime", "sys.exit(1) if should_run_action(\"test\", args.build_actions): print(\"** Testing %s **\" % package_name)", "parsed.build_dir) return parsed def run(args): sourcekit_searchpath=args.sourcekitd_dir package_name = os.path.basename(args.package_dir) env", "except subprocess.CalledProcessError as e: printerr('FAIL: Generating the Xcode project failed')", "be able to query SwiftPM for this info. if package_dir.endswith(\"/SourceKitStressTester\"):", "print(\"** Testing %s **\" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='test',", "xcodeproj_path = os.path.join(package_dir, '%s.xcodeproj' % package_name) args = [swift_exec, 'package',", "a swift workspace. \"\"\" from __future__ import print_function import argparse", "%s' % ' '.join(e.cmd)) sys.exit(1) output_dir = os.path.realpath(os.path.join(args.build_dir, args.config)) if", "Swift.org open source project Copyright (c) 2014 - 2018 Apple", "SYSTEM_FRAMEWORK_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) LD_RUNPATH_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) '''.format(sourcekit_searchpath=sourcekit_searchpath)) xcodeproj_path", "in zip([value] * len(list), list) for item in pair] def", "else: return arg def get_products(package_dir): # FIXME: We ought to", "def should_run_action(action_name, selected_actions): if action_name in selected_actions: return True elif", "build_dir] if multiroot_data_file: args.extend(['--multiroot-data-file', multiroot_data_file]) if action == 'test': args.extend(['--test-product',", "check_call(args, env=env, verbose=verbose) def install_package(package_dir, install_dir, sourcekit_searchpath, build_dir, rpaths_to_delete, verbose):", "verbose): cmd = ['install_name_tool', '-add_rpath', rpath, binary] check_call(cmd, verbose=verbose) def", "'.join([escape_cmd_arg(arg) for arg in cmd])) return subprocess.check_call(cmd, env=env, stderr=subprocess.STDOUT, **kwargs)", "Inc. and the Swift project authors Licensed under Apache License", "dependencies (i.e. checked out next sourcekit-lsp). if not args.no_local_deps: env['SWIFTCI_USE_LOCAL_DEPS']", "package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) # The test", "%s' % ' '.join(e.cmd)) sys.exit(1) # The test action creates", "selected_actions: return True else: return False def update_swiftpm_dependencies(package_dir, swift_exec, build_dir,", "config_path, '--output', xcodeproj_path] check_call(args, env=env, verbose=verbose) def add_rpath(binary, rpath, verbose):", "<filename>build-script-helper.py #!/usr/bin/env python \"\"\" This source file is part of", "absolute path, relative to package_dir. parsed.build_dir = os.path.join(parsed.package_dir, parsed.build_dir) return", "any of the actions in `action_names` should be run. def", "subprocess.CalledProcessError as e: printerr('FAIL: Updating dependencies of %s failed' %", "print_function import argparse import sys import os, platform import subprocess", "as config_file: config_file.write(''' SYSTEM_FRAMEWORK_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) LD_RUNPATH_SEARCH_PATHS = {sourcekit_searchpath}", "except subprocess.CalledProcessError as e: printerr('FAIL: Installing %s failed' % package_name)", "to use when building this package') parser.add_argument('--update', action='store_true', help='update all", "== 'test': args.extend(['--test-product', product]) else: args.extend(['--product', product]) # Tell SwiftSyntax", "sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose): args = [swift_exec, action,", "args.extend(['--test-product', product]) else: args.extend(['--product', product]) # Tell SwiftSyntax that we", "project for %s **\" % package_name) try: generate_xcodeproj(args.package_dir, swift_exec=args.swift_exec, sourcekit_searchpath=sourcekit_searchpath,", "add_rpath(binary, rpath, verbose): cmd = ['install_name_tool', '-add_rpath', rpath, binary] check_call(cmd,", "build_dir, rpaths_to_delete, verbose): bin_dir = os.path.join(install_dir, 'bin') lib_dir = os.path.join(install_dir,", "an Xcode workspace to create a unified build of SwiftSyntax", "create a unified build of SwiftSyntax with other projects.') parser.add_argument('--toolchain',", "out next sourcekit-lsp). if not args.no_local_deps: env['SWIFTCI_USE_LOCAL_DEPS'] = \"1\" if", "['install_name_tool', '-add_rpath', rpath, binary] check_call(cmd, verbose=verbose) def remove_rpath(binary, rpath, verbose):", "sys.argv[1:]) run(args) def parse_args(args): parser = argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY') parser.add_argument('--package-dir', default='SourceKitStressTester') parser.add_argument('-v',", "open(config_path, 'w') as config_file: config_file.write(''' SYSTEM_FRAMEWORK_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) LD_RUNPATH_SEARCH_PATHS", "\"all\" in parsed.build_actions) and not parsed.prefix: ArgumentParser.error(\"'--prefix' is required with", "directory in [bin_dir, lib_dir]: if not os.path.exists(directory): os.makedirs(directory) # Install", "other. for product in products: invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath,", "build-script.py that knows how to build and install the stress", "'--output', xcodeproj_path] check_call(args, env=env, verbose=verbose) def add_rpath(binary, rpath, verbose): cmd", "= parser.parse_args(args) if (\"install\" in parsed.build_actions or \"all\" in parsed.build_actions)", "os.path.basename(args.package_dir) env = dict(os.environ) # Use local dependencies (i.e. checked", "action creates its own build. No need to build if", "True elif \"all\" in selected_actions: return True else: return False", "sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose) def invoke_swift_single_product(package_dir, swift_exec, action,", "' '.join(e.cmd)) sys.exit(1) if should_run_action(\"test\", args.build_actions): print(\"** Testing %s **\"", "with the install action\") parsed.swift_exec = os.path.join(parsed.toolchain, 'usr', 'bin', 'swift')", "sys.exit(1) output_dir = os.path.realpath(os.path.join(args.build_dir, args.config)) if should_run_action(\"generate-xcodeproj\", args.build_actions): print(\"** Generating", "it has already been built before. env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] = '1' env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH']", "add_rpath(dest, rpath, verbose=verbose) def generate_xcodeproj(package_dir, swift_exec, sourcekit_searchpath, env, verbose): package_name", "the Swift project authors Licensed under Apache License v2.0 with", "print(\"** Installing %s **\" % package_name) stdlib_dir = os.path.join(args.toolchain, 'usr',", "os.path.basename(package_dir) config_path = os.path.join(package_dir, 'Config.xcconfig') with open(config_path, 'w') as config_file:", "does not need to rebuilt if it has already been", "built before. env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] = '1' env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH'] = sourcekit_searchpath check_call(args, env=env,", "next sourcekit-lsp). if not args.no_local_deps: env['SWIFTCI_USE_LOCAL_DEPS'] = \"1\" if args.update:", "printerr('FAIL: Building %s failed' % package_name) printerr('Executing: %s' % '", "verbose=verbose) def add_rpath(binary, rpath, verbose): cmd = ['install_name_tool', '-add_rpath', rpath,", "sys.exit(1) # The test action creates its own build. No", "sourcekit_searchpath check_call(args, env=env, verbose=verbose) def install_package(package_dir, install_dir, sourcekit_searchpath, build_dir, rpaths_to_delete,", "path, relative to root of repo. repo_path = os.path.dirname(__file__) parsed.package_dir", "package_dir, '-c', config, '--build-path', build_dir] if multiroot_data_file: args.extend(['--multiroot-data-file', multiroot_data_file]) if", "'-c', config, '--build-path', build_dir] if multiroot_data_file: args.extend(['--multiroot-data-file', multiroot_data_file]) if action", "this package') parser.add_argument('--update', action='store_true', help='update all SwiftPM dependencies') parser.add_argument('--no-local-deps', action='store_true',", "to rebuilt if it has already been built before. env['SWIFT_BUILD_SCRIPT_ENVIRONMENT']", "pair] def escape_cmd_arg(arg): if '\"' in arg or ' '", "package_dir, '--build-path', build_dir, 'update'] check_call(args, env=env, verbose=verbose) def invoke_swift(package_dir, swift_exec,", "verbose): args = [swift_exec, action, '--package-path', package_dir, '-c', config, '--build-path',", "config, env, verbose) def invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir,", "False def should_run_action(action_name, selected_actions): if action_name in selected_actions: return True", "dest] print('installing %s to %s' % (os.path.basename(src), dest)) check_call(copy_cmd, verbose=verbose)", "return False def update_swiftpm_dependencies(package_dir, swift_exec, build_dir, env, verbose): args =", "list) for item in pair] def escape_cmd_arg(arg): if '\"' in", "% package_name) try: generate_xcodeproj(args.package_dir, swift_exec=args.swift_exec, sourcekit_searchpath=sourcekit_searchpath, env=env, verbose=args.verbose) except subprocess.CalledProcessError", "is implemented, we cannot request a build of multiple #", "actions in `action_names` should be run. def should_run_any_action(action_names, selected_actions): for", "This source file is part of the Swift.org open source", "of the actions in `action_names` should be run. def should_run_any_action(action_names,", "env, verbose): args = [swift_exec, action, '--package-path', package_dir, '-c', config,", "building in a build-script environment so that # it does", "= os.path.join(install_dir, 'bin') lib_dir = os.path.join(install_dir, 'lib', 'swift', 'macosx') for", "or ' ' in arg: return '\"%s\"' % arg.replace('\"', '\\\\\"')", "if action_name in selected_actions: return True elif \"all\" in selected_actions:", "2014 - 2018 Apple Inc. and the Swift project authors", "Convert package_dir to absolute path, relative to root of repo.", "be run. def should_run_any_action(action_names, selected_actions): for action_name in action_names: if", "os.path.join(package_dir, 'Config.xcconfig') with open(config_path, 'w') as config_file: config_file.write(''' SYSTEM_FRAMEWORK_SEARCH_PATHS =", "'\"' in arg or ' ' in arg: return '\"%s\"'", "if it has already been built before. env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] = '1'", "# Use local dependencies (i.e. checked out next sourcekit-lsp). if", "SwiftSyntax with other projects.') parser.add_argument('--toolchain', required=True, help='the toolchain to use", "package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) output_dir = os.path.realpath(os.path.join(args.build_dir,", "run. def should_run_any_action(action_names, selected_actions): for action_name in action_names: if should_run_action(action_name,", "* len(list), list) for item in pair] def escape_cmd_arg(arg): if", "toolchain rpaths_to_delete_for_this_product += [sourcekit_searchpath] rpaths_to_add += ['@executable_path/../lib'] install(src, dest, rpaths_to_delete=rpaths_to_delete_for_this_product,", "package_dir. parsed.build_dir = os.path.join(parsed.package_dir, parsed.build_dir) return parsed def run(args): sourcekit_searchpath=args.sourcekitd_dir", "if we are just testing if should_run_any_action(['build', 'install'], args.build_actions): print(\"**", "**kwargs): if verbose: print(' '.join([escape_cmd_arg(arg) for arg in cmd])) return", "as e: printerr('FAIL: Updating dependencies of %s failed' % package_name)", "[swift_exec, 'package', '--package-path', package_dir, '--build-path', build_dir, 'update'] check_call(args, env=env, verbose=verbose)", "print(\"** Generating Xcode project for %s **\" % package_name) try:", "'lib', 'swift', 'macosx') for directory in [bin_dir, lib_dir]: if not", "arg def get_products(package_dir): # FIXME: We ought to be able", "argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY') parser.add_argument('--package-dir', default='SourceKitStressTester') parser.add_argument('-v', '--verbose', action='store_true', help='log executed commands') parser.add_argument('--prefix',", "`action_names` should be run. def should_run_any_action(action_names, selected_actions): for action_name in", "cannot request a build of multiple # targets simultaneously. For", "relative to root of repo. repo_path = os.path.dirname(__file__) parsed.package_dir =", "False def update_swiftpm_dependencies(package_dir, swift_exec, build_dir, env, verbose): args = [swift_exec,", "args = [swift_exec, action, '--package-path', package_dir, '-c', config, '--build-path', build_dir]", "rpaths_to_delete, verbose): bin_dir = os.path.join(install_dir, 'bin') lib_dir = os.path.join(install_dir, 'lib',", "swift_exec=args.swift_exec, build_dir=args.build_dir, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Updating", "package_name) args = [swift_exec, 'package', '--package-path', package_dir, 'generate-xcodeproj', '--xcconfig-overrides', config_path,", "since we modify it rpaths_to_delete_for_this_product = list(rpaths_to_delete) # Add the", "Until rdar://53881101 is implemented, we cannot request a build of", "multiroot_data_file, config, env, verbose): # Until rdar://53881101 is implemented, we", "if product in ['sk-stress-test', 'swift-evolve']: # Make the rpath to", "install the stress tester utilities given a swift workspace. \"\"\"", "return ['swift-evolve'] else: return [] if __name__ == '__main__': main()", "local dependencies (i.e. checked out next sourcekit-lsp). if not args.no_local_deps:", "action='store_true', help='use normal remote dependencies when building') parser.add_argument('build_actions', help=\"Extra actions", "parser.add_argument('--package-dir', default='SourceKitStressTester') parser.add_argument('-v', '--verbose', action='store_true', help='log executed commands') parser.add_argument('--prefix', help='install", "script for the main swift repository's build-script.py that knows how", "parsed.swift_exec = os.path.join(parsed.toolchain, 'usr', 'bin', 'swift') parsed.sourcekitd_dir = os.path.join(parsed.toolchain, 'usr',", "rebuilt if it has already been built before. env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] =", "os.path.join(bin_dir, product) # Create a copy of the list since", "= \"1\" if args.update: print(\"** Updating dependencies of %s **\"", "= os.path.realpath( os.path.join(repo_path, parsed.package_dir)) # Convert build_dir to absolute path,", "config, env, verbose): args = [swift_exec, action, '--package-path', package_dir, '-c',", "build of multiple # targets simultaneously. For now, just build", "# it does not need to rebuilt if it has", "in action_names: if should_run_action(action_name, selected_actions): return True return False def", "is part of the Swift.org open source project Copyright (c)", "'usr', 'bin', 'swift') parsed.sourcekitd_dir = os.path.join(parsed.toolchain, 'usr', 'lib') # Convert", "required=True, help='the toolchain to use when building this package') parser.add_argument('--update',", "action_name in action_names: if should_run_action(action_name, selected_actions): return True return False", "env=env, stderr=subprocess.STDOUT, **kwargs) def interleave(value, list): return [item for pair", "parser.parse_args(args) if (\"install\" in parsed.build_actions or \"all\" in parsed.build_actions) and", "project failed') printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) if should_run_action(\"test\",", "not need to rebuilt if it has already been built", "the toolchain rpaths_to_delete_for_this_product += [sourcekit_searchpath] rpaths_to_add += ['@executable_path/../lib'] install(src, dest,", "invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose)", "%s' % (os.path.basename(src), dest)) check_call(copy_cmd, verbose=verbose) for rpath in rpaths_to_delete:", "def parse_args(args): parser = argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY') parser.add_argument('--package-dir', default='SourceKitStressTester') parser.add_argument('-v', '--verbose', action='store_true',", "the stdlib in in the toolchain rpaths_to_add = ['@executable_path/../lib/swift/macosx'] if", "xcodeproj_path] check_call(args, env=env, verbose=verbose) def add_rpath(binary, rpath, verbose): cmd =", "verbose): copy_cmd=['rsync', '-a', src, dest] print('installing %s to %s' %", "for pair in zip([value] * len(list), list) for item in", "output_dir = os.path.realpath(os.path.join(args.build_dir, args.config)) if should_run_action(\"generate-xcodeproj\", args.build_actions): print(\"** Generating Xcode", "https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors ------------------------------------------------------------------------------ This", "if should_run_any_action(['build', 'install'], args.build_actions): print(\"** Building %s **\" % package_name)", "'bin', 'swift') parsed.sourcekitd_dir = os.path.join(parsed.toolchain, 'usr', 'lib') # Convert package_dir", "rpaths_to_add=rpaths_to_add, verbose=verbose) def install(src, dest, rpaths_to_delete, rpaths_to_add, verbose): copy_cmd=['rsync', '-a',", "platform import subprocess def printerr(message): print(message, file=sys.stderr) def main(argv_prefix =", "help='log executed commands') parser.add_argument('--prefix', help='install path') parser.add_argument('--config', default='debug') parser.add_argument('--build-dir', default='.build')", "'%s.xcodeproj' % package_name) args = [swift_exec, 'package', '--package-path', package_dir, 'generate-xcodeproj',", "= ['install_name_tool', '-delete_rpath', rpath, binary] check_call(cmd, verbose=verbose) def check_call(cmd, verbose,", "tester utilities given a swift workspace. \"\"\" from __future__ import", "product]) else: args.extend(['--product', product]) # Tell SwiftSyntax that we are", "to absolute path, relative to root of repo. repo_path =", "['@executable_path/../lib'] install(src, dest, rpaths_to_delete=rpaths_to_delete_for_this_product, rpaths_to_add=rpaths_to_add, verbose=verbose) def install(src, dest, rpaths_to_delete,", "action='store_true', help='update all SwiftPM dependencies') parser.add_argument('--no-local-deps', action='store_true', help='use normal remote", "= os.path.join(parsed.toolchain, 'usr', 'lib') # Convert package_dir to absolute path,", "= {sourcekit_searchpath} $(inherited) '''.format(sourcekit_searchpath=sourcekit_searchpath)) xcodeproj_path = os.path.join(package_dir, '%s.xcodeproj' % package_name)", "try: update_swiftpm_dependencies(package_dir=args.package_dir, swift_exec=args.swift_exec, build_dir=args.build_dir, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e:", "be any number of the following\", choices=['all', 'build', 'test', 'install',", "nargs=\"*\", default=['build']) parsed = parser.parse_args(args) if (\"install\" in parsed.build_actions or", "test action creates its own build. No need to build", "True return False def should_run_action(action_name, selected_actions): if action_name in selected_actions:", "as e: printerr('FAIL: Testing %s failed' % package_name) printerr('Executing: %s'", "verbose=verbose) def check_call(cmd, verbose, env=os.environ, **kwargs): if verbose: print(' '.join([escape_cmd_arg(arg)", "building this package') parser.add_argument('--update', action='store_true', help='update all SwiftPM dependencies') parser.add_argument('--no-local-deps',", "return ['sk-stress-test', 'sk-swiftc-wrapper'] elif package_dir.endswith(\"/SwiftEvolve\"): return ['swift-evolve'] else: return []", "parser.add_argument('--update', action='store_true', help='update all SwiftPM dependencies') parser.add_argument('--no-local-deps', action='store_true', help='use normal", "if not os.path.exists(directory): os.makedirs(directory) # Install sk-stress-test and sk-swiftc-wrapper for", "rpaths_to_add: add_rpath(dest, rpath, verbose=verbose) def generate_xcodeproj(package_dir, swift_exec, sourcekit_searchpath, env, verbose):", "to build and install the stress tester utilities given a", "Copyright (c) 2014 - 2018 Apple Inc. and the Swift", "subprocess.CalledProcessError as e: printerr('FAIL: Installing %s failed' % package_name) printerr('Executing:", "'\\\\\"') else: return arg def get_products(package_dir): # FIXME: We ought", "products=get_products(args.package_dir), sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError as", "print(\"** Updating dependencies of %s **\" % package_name) try: update_swiftpm_dependencies(package_dir=args.package_dir,", "if args.update: print(\"** Updating dependencies of %s **\" % package_name)", "if action == 'test': args.extend(['--test-product', product]) else: args.extend(['--product', product]) #", "elif \"all\" in selected_actions: return True else: return False def", "dict(os.environ) # Use local dependencies (i.e. checked out next sourcekit-lsp).", "package_name], sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError as", "build_dir to absolute path, relative to package_dir. parsed.build_dir = os.path.join(parsed.package_dir,", "path, relative to package_dir. parsed.build_dir = os.path.join(parsed.package_dir, parsed.build_dir) return parsed", "def add_rpath(binary, rpath, verbose): cmd = ['install_name_tool', '-add_rpath', rpath, binary]", "list): return [item for pair in zip([value] * len(list), list)", "printerr('FAIL: Updating dependencies of %s failed' % package_name) printerr('Executing: %s'", "subprocess def printerr(message): print(message, file=sys.stderr) def main(argv_prefix = []): args", "own build. No need to build if we are just", "actions to perform. Can be any number of the following\",", "check_call(cmd, verbose=verbose) def remove_rpath(binary, rpath, verbose): cmd = ['install_name_tool', '-delete_rpath',", "'--package-path', package_dir, '--build-path', build_dir, 'update'] check_call(args, env=env, verbose=verbose) def invoke_swift(package_dir,", "to absolute path, relative to package_dir. parsed.build_dir = os.path.join(parsed.package_dir, parsed.build_dir)", "% package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) if should_run_action(\"install\",", "'--verbose', action='store_true', help='log executed commands') parser.add_argument('--prefix', help='install path') parser.add_argument('--config', default='debug')", "verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Building %s failed' %", "check_call(cmd, verbose, env=os.environ, **kwargs): if verbose: print(' '.join([escape_cmd_arg(arg) for arg", "sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e:", "this info. if package_dir.endswith(\"/SourceKitStressTester\"): return ['sk-stress-test', 'sk-swiftc-wrapper'] elif package_dir.endswith(\"/SwiftEvolve\"): return", "\"\"\" This source file is part of the Swift.org open", "Can be any number of the following\", choices=['all', 'build', 'test',", "invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='build', products=get_products(args.package_dir), sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose)", "= argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY') parser.add_argument('--package-dir', default='SourceKitStressTester') parser.add_argument('-v', '--verbose', action='store_true', help='log executed commands')", "e: printerr('FAIL: Generating the Xcode project failed') printerr('Executing: %s' %", "to sourcekitd relative in the toolchain rpaths_to_delete_for_this_product += [sourcekit_searchpath] rpaths_to_add", "in rpaths_to_add: add_rpath(dest, rpath, verbose=verbose) def generate_xcodeproj(package_dir, swift_exec, sourcekit_searchpath, env,", "verbose=verbose) def install_package(package_dir, install_dir, sourcekit_searchpath, build_dir, rpaths_to_delete, verbose): bin_dir =", "parsed.sourcekitd_dir = os.path.join(parsed.toolchain, 'usr', 'lib') # Convert package_dir to absolute", "' '.join(e.cmd)) sys.exit(1) # Returns true if any of the", "'macosx') for directory in [bin_dir, lib_dir]: if not os.path.exists(directory): os.makedirs(directory)", "knows how to build and install the stress tester utilities", "+= [sourcekit_searchpath] rpaths_to_add += ['@executable_path/../lib'] install(src, dest, rpaths_to_delete=rpaths_to_delete_for_this_product, rpaths_to_add=rpaths_to_add, verbose=verbose)", "% ' '.join(e.cmd)) sys.exit(1) # The test action creates its", "action, '--package-path', package_dir, '-c', config, '--build-path', build_dir] if multiroot_data_file: args.extend(['--multiroot-data-file',", "FIXME: We ought to be able to query SwiftPM for", "for license information See https://swift.org/CONTRIBUTORS.txt for the list of Swift", "dependencies') parser.add_argument('--no-local-deps', action='store_true', help='use normal remote dependencies when building') parser.add_argument('build_actions',", "in selected_actions: return True else: return False def update_swiftpm_dependencies(package_dir, swift_exec,", "binary] check_call(cmd, verbose=verbose) def remove_rpath(binary, rpath, verbose): cmd = ['install_name_tool',", "request a build of multiple # targets simultaneously. For now,", "$(inherited) LD_RUNPATH_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) '''.format(sourcekit_searchpath=sourcekit_searchpath)) xcodeproj_path = os.path.join(package_dir, '%s.xcodeproj'", "if '\"' in arg or ' ' in arg: return", "arg.replace('\"', '\\\\\"') else: return arg def get_products(package_dir): # FIXME: We", "with Runtime Library Exception See https://swift.org/LICENSE.txt for license information See", "of Swift project authors ------------------------------------------------------------------------------ This is a helper script", "help='update all SwiftPM dependencies') parser.add_argument('--no-local-deps', action='store_true', help='use normal remote dependencies", "= [swift_exec, 'package', '--package-path', package_dir, '--build-path', build_dir, 'update'] check_call(args, env=env,", "that we are building in a build-script environment so that", "install(src, dest, rpaths_to_delete, rpaths_to_add, verbose): copy_cmd=['rsync', '-a', src, dest] print('installing", "executed commands') parser.add_argument('--prefix', help='install path') parser.add_argument('--config', default='debug') parser.add_argument('--build-dir', default='.build') parser.add_argument('--multiroot-data-file',", "cmd = ['install_name_tool', '-delete_rpath', rpath, binary] check_call(cmd, verbose=verbose) def check_call(cmd,", "dest, rpaths_to_delete, rpaths_to_add, verbose): copy_cmd=['rsync', '-a', src, dest] print('installing %s", "parser.add_argument('--prefix', help='install path') parser.add_argument('--config', default='debug') parser.add_argument('--build-dir', default='.build') parser.add_argument('--multiroot-data-file', help='Path to", "import os, platform import subprocess def printerr(message): print(message, file=sys.stderr) def", "Installing %s **\" % package_name) stdlib_dir = os.path.join(args.toolchain, 'usr', 'lib',", "os.path.join(parsed.toolchain, 'usr', 'bin', 'swift') parsed.sourcekitd_dir = os.path.join(parsed.toolchain, 'usr', 'lib') #", "printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) # Returns true if", "This is a helper script for the main swift repository's", "return '\"%s\"' % arg.replace('\"', '\\\\\"') else: return arg def get_products(package_dir):", "from __future__ import print_function import argparse import sys import os,", "multiroot_data_file: args.extend(['--multiroot-data-file', multiroot_data_file]) if action == 'test': args.extend(['--test-product', product]) else:", "args = parse_args(argv_prefix + sys.argv[1:]) run(args) def parse_args(args): parser =", "parse_args(argv_prefix + sys.argv[1:]) run(args) def parse_args(args): parser = argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY') parser.add_argument('--package-dir',", "'macosx') try: install_package(args.package_dir, install_dir=args.prefix, sourcekit_searchpath=sourcekit_searchpath, build_dir=output_dir, rpaths_to_delete=[stdlib_dir], verbose=args.verbose) except subprocess.CalledProcessError", "sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose): # Until rdar://53881101 is", "authors ------------------------------------------------------------------------------ This is a helper script for the main", "for rpath in rpaths_to_add: add_rpath(dest, rpath, verbose=verbose) def generate_xcodeproj(package_dir, swift_exec,", "'swift') parsed.sourcekitd_dir = os.path.join(parsed.toolchain, 'usr', 'lib') # Convert package_dir to", "'test': args.extend(['--test-product', product]) else: args.extend(['--product', product]) # Tell SwiftSyntax that", "args.build_actions): print(\"** Building %s **\" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec,", "Xcode project for %s **\" % package_name) try: generate_xcodeproj(args.package_dir, swift_exec=args.swift_exec,", "return subprocess.check_call(cmd, env=env, stderr=subprocess.STDOUT, **kwargs) def interleave(value, list): return [item", "source project Copyright (c) 2014 - 2018 Apple Inc. and", "os.path.join(repo_path, parsed.package_dir)) # Convert build_dir to absolute path, relative to", "should_run_any_action(['build', 'install'], args.build_actions): print(\"** Building %s **\" % package_name) try:", "after the other. for product in products: invoke_swift_single_product(package_dir, swift_exec, action,", "'--build-path', build_dir] if multiroot_data_file: args.extend(['--multiroot-data-file', multiroot_data_file]) if action == 'test':", "env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Building %s failed'", "% ' '.join(e.cmd)) sys.exit(1) if should_run_action(\"test\", args.build_actions): print(\"** Testing %s", "sourcekit_searchpath=sourcekit_searchpath, build_dir=output_dir, rpaths_to_delete=[stdlib_dir], verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Installing", "swift_exec=args.swift_exec, sourcekit_searchpath=sourcekit_searchpath, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Generating", "sys.exit(1) # Returns true if any of the actions in", "simultaneously. For now, just build one product after the other.", "failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) #", "'--build-path', build_dir, 'update'] check_call(args, env=env, verbose=verbose) def invoke_swift(package_dir, swift_exec, action,", "print(' '.join([escape_cmd_arg(arg) for arg in cmd])) return subprocess.check_call(cmd, env=env, stderr=subprocess.STDOUT,", "if should_run_action(\"generate-xcodeproj\", args.build_actions): print(\"** Generating Xcode project for %s **\"", "product in ['sk-stress-test', 'swift-evolve']: # Make the rpath to sourcekitd", "return True elif \"all\" in selected_actions: return True else: return", "if multiroot_data_file: args.extend(['--multiroot-data-file', multiroot_data_file]) if action == 'test': args.extend(['--test-product', product])", "repo_path = os.path.dirname(__file__) parsed.package_dir = os.path.realpath( os.path.join(repo_path, parsed.package_dir)) # Convert", "to package_dir. parsed.build_dir = os.path.join(parsed.package_dir, parsed.build_dir) return parsed def run(args):", "if any of the actions in `action_names` should be run.", "parsed.build_actions) and not parsed.prefix: ArgumentParser.error(\"'--prefix' is required with the install", "return True else: return False def update_swiftpm_dependencies(package_dir, swift_exec, build_dir, env,", "copy of the list since we modify it rpaths_to_delete_for_this_product =", "for this info. if package_dir.endswith(\"/SourceKitStressTester\"): return ['sk-stress-test', 'sk-swiftc-wrapper'] elif package_dir.endswith(\"/SwiftEvolve\"):", "the list of Swift project authors ------------------------------------------------------------------------------ This is a", "config, '--build-path', build_dir] if multiroot_data_file: args.extend(['--multiroot-data-file', multiroot_data_file]) if action ==", "should_run_action(\"install\", args.build_actions): print(\"** Installing %s **\" % package_name) stdlib_dir =", "def should_run_any_action(action_names, selected_actions): for action_name in action_names: if should_run_action(action_name, selected_actions):", "%s **\" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='test', products=['%sPackageTests' %", "open source project Copyright (c) 2014 - 2018 Apple Inc.", "parsed = parser.parse_args(args) if (\"install\" in parsed.build_actions or \"all\" in", "we cannot request a build of multiple # targets simultaneously.", "verbose=verbose) for rpath in rpaths_to_add: add_rpath(dest, rpath, verbose=verbose) def generate_xcodeproj(package_dir,", "'w') as config_file: config_file.write(''' SYSTEM_FRAMEWORK_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) LD_RUNPATH_SEARCH_PATHS =", "[swift_exec, 'package', '--package-path', package_dir, 'generate-xcodeproj', '--xcconfig-overrides', config_path, '--output', xcodeproj_path] check_call(args,", "sourcekit_searchpath, build_dir, rpaths_to_delete, verbose): bin_dir = os.path.join(install_dir, 'bin') lib_dir =", "we are building in a build-script environment so that #", "verbose=verbose) def generate_xcodeproj(package_dir, swift_exec, sourcekit_searchpath, env, verbose): package_name = os.path.basename(package_dir)", "config_file: config_file.write(''' SYSTEM_FRAMEWORK_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) LD_RUNPATH_SEARCH_PATHS = {sourcekit_searchpath} $(inherited)", "env=env, verbose=verbose) def invoke_swift(package_dir, swift_exec, action, products, sourcekit_searchpath, build_dir, multiroot_data_file,", "'.join(e.cmd)) sys.exit(1) # Returns true if any of the actions", "build if we are just testing if should_run_any_action(['build', 'install'], args.build_actions):", "verbose=verbose) def remove_rpath(binary, rpath, verbose): cmd = ['install_name_tool', '-delete_rpath', rpath,", "src = os.path.join(build_dir, product) dest = os.path.join(bin_dir, product) # Create", "products, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose): # Until rdar://53881101", "dest = os.path.join(bin_dir, product) # Create a copy of the", "[bin_dir, lib_dir]: if not os.path.exists(directory): os.makedirs(directory) # Install sk-stress-test and", "environment so that # it does not need to rebuilt", "package_dir, 'generate-xcodeproj', '--xcconfig-overrides', config_path, '--output', xcodeproj_path] check_call(args, env=env, verbose=verbose) def", "a unified build of SwiftSyntax with other projects.') parser.add_argument('--toolchain', required=True,", "root of repo. repo_path = os.path.dirname(__file__) parsed.package_dir = os.path.realpath( os.path.join(repo_path,", "build-script environment so that # it does not need to", "Returns true if any of the actions in `action_names` should", "config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Building %s", "products: invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env,", "absolute path, relative to root of repo. repo_path = os.path.dirname(__file__)", "in products: invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config,", "SwiftPM dependencies') parser.add_argument('--no-local-deps', action='store_true', help='use normal remote dependencies when building')", "has already been built before. env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] = '1' env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH'] =", "rpath, binary] check_call(cmd, verbose=verbose) def check_call(cmd, verbose, env=os.environ, **kwargs): if", "action_name in selected_actions: return True elif \"all\" in selected_actions: return", "['@executable_path/../lib/swift/macosx'] if product in ['sk-stress-test', 'swift-evolve']: # Make the rpath", "% package_name], sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError", "v2.0 with Runtime Library Exception See https://swift.org/LICENSE.txt for license information", "'.join(e.cmd)) sys.exit(1) output_dir = os.path.realpath(os.path.join(args.build_dir, args.config)) if should_run_action(\"generate-xcodeproj\", args.build_actions): print(\"**", "the list since we modify it rpaths_to_delete_for_this_product = list(rpaths_to_delete) #", "install_package(args.package_dir, install_dir=args.prefix, sourcekit_searchpath=sourcekit_searchpath, build_dir=output_dir, rpaths_to_delete=[stdlib_dir], verbose=args.verbose) except subprocess.CalledProcessError as e:", "before. env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] = '1' env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH'] = sourcekit_searchpath check_call(args, env=env, verbose=verbose)", "env, verbose) def invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file,", "env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Updating dependencies of", "%s' % ' '.join(e.cmd)) sys.exit(1) if should_run_action(\"install\", args.build_actions): print(\"** Installing", "% ' '.join(e.cmd)) sys.exit(1) # Returns true if any of", "For now, just build one product after the other. for", "arg or ' ' in arg: return '\"%s\"' % arg.replace('\"',", "parser = argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY') parser.add_argument('--package-dir', default='SourceKitStressTester') parser.add_argument('-v', '--verbose', action='store_true', help='log executed", "selected_actions): return True return False def should_run_action(action_name, selected_actions): if action_name", "We ought to be able to query SwiftPM for this", "the toolchain rpaths_to_add = ['@executable_path/../lib/swift/macosx'] if product in ['sk-stress-test', 'swift-evolve']:", "parsed.build_actions or \"all\" in parsed.build_actions) and not parsed.prefix: ArgumentParser.error(\"'--prefix' is", "list since we modify it rpaths_to_delete_for_this_product = list(rpaths_to_delete) # Add", "selected_actions: return True elif \"all\" in selected_actions: return True else:", "under Apache License v2.0 with Runtime Library Exception See https://swift.org/LICENSE.txt", "should_run_action(action_name, selected_actions): return True return False def should_run_action(action_name, selected_actions): if", "parsed.build_dir = os.path.join(parsed.package_dir, parsed.build_dir) return parsed def run(args): sourcekit_searchpath=args.sourcekitd_dir package_name", "try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='build', products=get_products(args.package_dir), sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env,", "already been built before. env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] = '1' env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH'] = sourcekit_searchpath", "# Install sk-stress-test and sk-swiftc-wrapper for product in get_products(package_dir): src", "source file is part of the Swift.org open source project", "'-add_rpath', rpath, binary] check_call(cmd, verbose=verbose) def remove_rpath(binary, rpath, verbose): cmd", "%s failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1)", "list(rpaths_to_delete) # Add the rpath to the stdlib in in", "swift_exec=args.swift_exec, action='test', products=['%sPackageTests' % package_name], sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env,", "'swift', 'macosx') try: install_package(args.package_dir, install_dir=args.prefix, sourcekit_searchpath=sourcekit_searchpath, build_dir=output_dir, rpaths_to_delete=[stdlib_dir], verbose=args.verbose) except", "'package', '--package-path', package_dir, '--build-path', build_dir, 'update'] check_call(args, env=env, verbose=verbose) def", "a build-script environment so that # it does not need", "arg in cmd])) return subprocess.check_call(cmd, env=env, stderr=subprocess.STDOUT, **kwargs) def interleave(value,", "dependencies when building') parser.add_argument('build_actions', help=\"Extra actions to perform. Can be", "stdlib_dir = os.path.join(args.toolchain, 'usr', 'lib', 'swift', 'macosx') try: install_package(args.package_dir, install_dir=args.prefix,", "rpaths_to_add = ['@executable_path/../lib/swift/macosx'] if product in ['sk-stress-test', 'swift-evolve']: # Make", "sourcekit_searchpath=args.sourcekitd_dir package_name = os.path.basename(args.package_dir) env = dict(os.environ) # Use local", "= os.path.join(package_dir, 'Config.xcconfig') with open(config_path, 'w') as config_file: config_file.write(''' SYSTEM_FRAMEWORK_SEARCH_PATHS", "workspace to create a unified build of SwiftSyntax with other", "os.path.realpath(os.path.join(args.build_dir, args.config)) if should_run_action(\"generate-xcodeproj\", args.build_actions): print(\"** Generating Xcode project for", "copy_cmd=['rsync', '-a', src, dest] print('installing %s to %s' % (os.path.basename(src),", "building') parser.add_argument('build_actions', help=\"Extra actions to perform. Can be any number", "'bin') lib_dir = os.path.join(install_dir, 'lib', 'swift', 'macosx') for directory in", "parsed.prefix: ArgumentParser.error(\"'--prefix' is required with the install action\") parsed.swift_exec =", "in selected_actions: return True elif \"all\" in selected_actions: return True", "product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose): args = [swift_exec,", "os.path.join(args.toolchain, 'usr', 'lib', 'swift', 'macosx') try: install_package(args.package_dir, install_dir=args.prefix, sourcekit_searchpath=sourcekit_searchpath, build_dir=output_dir,", "of the list since we modify it rpaths_to_delete_for_this_product = list(rpaths_to_delete)", "'install'], args.build_actions): print(\"** Building %s **\" % package_name) try: invoke_swift(package_dir=args.package_dir,", "config_file.write(''' SYSTEM_FRAMEWORK_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) LD_RUNPATH_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) '''.format(sourcekit_searchpath=sourcekit_searchpath))", "multiroot_data_file]) if action == 'test': args.extend(['--test-product', product]) else: args.extend(['--product', product])", "args = [swift_exec, 'package', '--package-path', package_dir, 'generate-xcodeproj', '--xcconfig-overrides', config_path, '--output',", "- 2018 Apple Inc. and the Swift project authors Licensed", "invoke_swift(package_dir, swift_exec, action, products, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose):", "env = dict(os.environ) # Use local dependencies (i.e. checked out", "dest, rpaths_to_delete=rpaths_to_delete_for_this_product, rpaths_to_add=rpaths_to_add, verbose=verbose) def install(src, dest, rpaths_to_delete, rpaths_to_add, verbose):", "help='Path to an Xcode workspace to create a unified build", "build_dir, multiroot_data_file, config, env, verbose) def invoke_swift_single_product(package_dir, swift_exec, action, product,", "sk-swiftc-wrapper for product in get_products(package_dir): src = os.path.join(build_dir, product) dest", "parse_args(args): parser = argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY') parser.add_argument('--package-dir', default='SourceKitStressTester') parser.add_argument('-v', '--verbose', action='store_true', help='log", "selected_actions): for action_name in action_names: if should_run_action(action_name, selected_actions): return True", "else: return False def update_swiftpm_dependencies(package_dir, swift_exec, build_dir, env, verbose): args", "just build one product after the other. for product in", "pair in zip([value] * len(list), list) for item in pair]", "for product in products: invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir,", "len(list), list) for item in pair] def escape_cmd_arg(arg): if '\"'", "'\"%s\"' % arg.replace('\"', '\\\\\"') else: return arg def get_products(package_dir): #", "multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Building", "rpaths_to_delete_for_this_product += [sourcekit_searchpath] rpaths_to_add += ['@executable_path/../lib'] install(src, dest, rpaths_to_delete=rpaths_to_delete_for_this_product, rpaths_to_add=rpaths_to_add,", "remote dependencies when building') parser.add_argument('build_actions', help=\"Extra actions to perform. Can", "or \"all\" in parsed.build_actions) and not parsed.prefix: ArgumentParser.error(\"'--prefix' is required", "printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) output_dir = os.path.realpath(os.path.join(args.build_dir, args.config))", "in cmd])) return subprocess.check_call(cmd, env=env, stderr=subprocess.STDOUT, **kwargs) def interleave(value, list):", "__future__ import print_function import argparse import sys import os, platform", "%s to %s' % (os.path.basename(src), dest)) check_call(copy_cmd, verbose=verbose) for rpath", "products=['%sPackageTests' % package_name], sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except", "a copy of the list since we modify it rpaths_to_delete_for_this_product", "# Convert build_dir to absolute path, relative to package_dir. parsed.build_dir", "if should_run_action(\"test\", args.build_actions): print(\"** Testing %s **\" % package_name) try:", "verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Installing %s failed' %", "get_products(package_dir): # FIXME: We ought to be able to query", "package_dir.endswith(\"/SwiftEvolve\"): return ['swift-evolve'] else: return [] if __name__ == '__main__':", "os.path.join(package_dir, '%s.xcodeproj' % package_name) args = [swift_exec, 'package', '--package-path', package_dir,", "zip([value] * len(list), list) for item in pair] def escape_cmd_arg(arg):", "% package_name) try: update_swiftpm_dependencies(package_dir=args.package_dir, swift_exec=args.swift_exec, build_dir=args.build_dir, env=env, verbose=args.verbose) except subprocess.CalledProcessError", "help='use normal remote dependencies when building') parser.add_argument('build_actions', help=\"Extra actions to", "'Config.xcconfig') with open(config_path, 'w') as config_file: config_file.write(''' SYSTEM_FRAMEWORK_SEARCH_PATHS = {sourcekit_searchpath}", "stdlib in in the toolchain rpaths_to_add = ['@executable_path/../lib/swift/macosx'] if product", "= os.path.basename(package_dir) config_path = os.path.join(package_dir, 'Config.xcconfig') with open(config_path, 'w') as", "repo. repo_path = os.path.dirname(__file__) parsed.package_dir = os.path.realpath( os.path.join(repo_path, parsed.package_dir)) #", "= os.path.basename(args.package_dir) env = dict(os.environ) # Use local dependencies (i.e.", "to an Xcode workspace to create a unified build of", "if package_dir.endswith(\"/SourceKitStressTester\"): return ['sk-stress-test', 'sk-swiftc-wrapper'] elif package_dir.endswith(\"/SwiftEvolve\"): return ['swift-evolve'] else:", "'--xcconfig-overrides', config_path, '--output', xcodeproj_path] check_call(args, env=env, verbose=verbose) def add_rpath(binary, rpath,", "[swift_exec, action, '--package-path', package_dir, '-c', config, '--build-path', build_dir] if multiroot_data_file:", "def install(src, dest, rpaths_to_delete, rpaths_to_add, verbose): copy_cmd=['rsync', '-a', src, dest]", "cmd = ['install_name_tool', '-add_rpath', rpath, binary] check_call(cmd, verbose=verbose) def remove_rpath(binary,", "remove_rpath(binary, rpath, verbose): cmd = ['install_name_tool', '-delete_rpath', rpath, binary] check_call(cmd,", "parsed.package_dir = os.path.realpath( os.path.join(repo_path, parsed.package_dir)) # Convert build_dir to absolute", "return False def should_run_action(action_name, selected_actions): if action_name in selected_actions: return", "to be able to query SwiftPM for this info. if", "SwiftPM for this info. if package_dir.endswith(\"/SourceKitStressTester\"): return ['sk-stress-test', 'sk-swiftc-wrapper'] elif", "args = [swift_exec, 'package', '--package-path', package_dir, '--build-path', build_dir, 'update'] check_call(args,", "item in pair] def escape_cmd_arg(arg): if '\"' in arg or", "'lib', 'swift', 'macosx') try: install_package(args.package_dir, install_dir=args.prefix, sourcekit_searchpath=sourcekit_searchpath, build_dir=output_dir, rpaths_to_delete=[stdlib_dir], verbose=args.verbose)", "env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Generating the Xcode", "of repo. repo_path = os.path.dirname(__file__) parsed.package_dir = os.path.realpath( os.path.join(repo_path, parsed.package_dir))", "checked out next sourcekit-lsp). if not args.no_local_deps: env['SWIFTCI_USE_LOCAL_DEPS'] = \"1\"", "env, verbose): # Until rdar://53881101 is implemented, we cannot request", "env['SWIFTCI_USE_LOCAL_DEPS'] = \"1\" if args.update: print(\"** Updating dependencies of %s", "targets simultaneously. For now, just build one product after the", "'-a', src, dest] print('installing %s to %s' % (os.path.basename(src), dest))", "def install_package(package_dir, install_dir, sourcekit_searchpath, build_dir, rpaths_to_delete, verbose): bin_dir = os.path.join(install_dir,", "config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Testing %s", "sys.exit(1) if should_run_action(\"install\", args.build_actions): print(\"** Installing %s **\" % package_name)", "multiple # targets simultaneously. For now, just build one product", "the rpath to the stdlib in in the toolchain rpaths_to_add", "the actions in `action_names` should be run. def should_run_any_action(action_names, selected_actions):", "update_swiftpm_dependencies(package_dir, swift_exec, build_dir, env, verbose): args = [swift_exec, 'package', '--package-path',", "package_name) try: generate_xcodeproj(args.package_dir, swift_exec=args.swift_exec, sourcekit_searchpath=sourcekit_searchpath, env=env, verbose=args.verbose) except subprocess.CalledProcessError as", "required with the install action\") parsed.swift_exec = os.path.join(parsed.toolchain, 'usr', 'bin',", "that # it does not need to rebuilt if it", "action, products, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose): # Until", "\"1\" if args.update: print(\"** Updating dependencies of %s **\" %", "Library Exception See https://swift.org/LICENSE.txt for license information See https://swift.org/CONTRIBUTORS.txt for", "config_path = os.path.join(package_dir, 'Config.xcconfig') with open(config_path, 'w') as config_file: config_file.write('''", "% ' '.join(e.cmd)) sys.exit(1) output_dir = os.path.realpath(os.path.join(args.build_dir, args.config)) if should_run_action(\"generate-xcodeproj\",", "helper script for the main swift repository's build-script.py that knows", "printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) # The test action", "args.build_actions): print(\"** Installing %s **\" % package_name) stdlib_dir = os.path.join(args.toolchain,", "and not parsed.prefix: ArgumentParser.error(\"'--prefix' is required with the install action\")", "package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) # Returns true", "printerr('FAIL: Testing %s failed' % package_name) printerr('Executing: %s' % '", "'update'] check_call(args, env=env, verbose=verbose) def invoke_swift(package_dir, swift_exec, action, products, sourcekit_searchpath,", "run(args) def parse_args(args): parser = argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY') parser.add_argument('--package-dir', default='SourceKitStressTester') parser.add_argument('-v', '--verbose',", "build_dir, multiroot_data_file, config, env, verbose): # Until rdar://53881101 is implemented,", "verbose=verbose) for rpath in rpaths_to_delete: remove_rpath(dest, rpath, verbose=verbose) for rpath", "% arg.replace('\"', '\\\\\"') else: return arg def get_products(package_dir): # FIXME:", "def remove_rpath(binary, rpath, verbose): cmd = ['install_name_tool', '-delete_rpath', rpath, binary]", "package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) if should_run_action(\"install\", args.build_actions):", "Swift project authors ------------------------------------------------------------------------------ This is a helper script for", "authors Licensed under Apache License v2.0 with Runtime Library Exception", "def interleave(value, list): return [item for pair in zip([value] *", "[]): args = parse_args(argv_prefix + sys.argv[1:]) run(args) def parse_args(args): parser", "rpaths_to_add += ['@executable_path/../lib'] install(src, dest, rpaths_to_delete=rpaths_to_delete_for_this_product, rpaths_to_add=rpaths_to_add, verbose=verbose) def install(src,", "= parse_args(argv_prefix + sys.argv[1:]) run(args) def parse_args(args): parser = argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY')", "to build if we are just testing if should_run_any_action(['build', 'install'],", "all SwiftPM dependencies') parser.add_argument('--no-local-deps', action='store_true', help='use normal remote dependencies when", "env=env, verbose=verbose) def add_rpath(binary, rpath, verbose): cmd = ['install_name_tool', '-add_rpath',", "= '1' env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH'] = sourcekit_searchpath check_call(args, env=env, verbose=verbose) def install_package(package_dir,", "verbose: print(' '.join([escape_cmd_arg(arg) for arg in cmd])) return subprocess.check_call(cmd, env=env,", "of SwiftSyntax with other projects.') parser.add_argument('--toolchain', required=True, help='the toolchain to", "Create a copy of the list since we modify it", "'swift', 'macosx') for directory in [bin_dir, lib_dir]: if not os.path.exists(directory):", "% package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='test', products=['%sPackageTests' % package_name], sourcekit_searchpath=sourcekit_searchpath,", "one product after the other. for product in products: invoke_swift_single_product(package_dir,", "**\" % package_name) stdlib_dir = os.path.join(args.toolchain, 'usr', 'lib', 'swift', 'macosx')", "need to rebuilt if it has already been built before.", "#!/usr/bin/env python \"\"\" This source file is part of the", "been built before. env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] = '1' env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH'] = sourcekit_searchpath check_call(args,", "default='.build') parser.add_argument('--multiroot-data-file', help='Path to an Xcode workspace to create a", "relative to package_dir. parsed.build_dir = os.path.join(parsed.package_dir, parsed.build_dir) return parsed def", "% package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='build', products=get_products(args.package_dir), sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file,", "# Add the rpath to the stdlib in in the", "Add the rpath to the stdlib in in the toolchain", "relative in the toolchain rpaths_to_delete_for_this_product += [sourcekit_searchpath] rpaths_to_add += ['@executable_path/../lib']", "Install sk-stress-test and sk-swiftc-wrapper for product in get_products(package_dir): src =", "escape_cmd_arg(arg): if '\"' in arg or ' ' in arg:", "' ' in arg: return '\"%s\"' % arg.replace('\"', '\\\\\"') else:", "package') parser.add_argument('--update', action='store_true', help='update all SwiftPM dependencies') parser.add_argument('--no-local-deps', action='store_true', help='use", "install_dir=args.prefix, sourcekit_searchpath=sourcekit_searchpath, build_dir=output_dir, rpaths_to_delete=[stdlib_dir], verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL:", "the following\", choices=['all', 'build', 'test', 'install', 'generate-xcodeproj'], nargs=\"*\", default=['build']) parsed", "rpath in rpaths_to_add: add_rpath(dest, rpath, verbose=verbose) def generate_xcodeproj(package_dir, swift_exec, sourcekit_searchpath,", "'-delete_rpath', rpath, binary] check_call(cmd, verbose=verbose) def check_call(cmd, verbose, env=os.environ, **kwargs):", "verbose): # Until rdar://53881101 is implemented, we cannot request a", "printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) if should_run_action(\"test\", args.build_actions): print(\"**", "verbose): package_name = os.path.basename(package_dir) config_path = os.path.join(package_dir, 'Config.xcconfig') with open(config_path,", "stress tester utilities given a swift workspace. \"\"\" from __future__", "'.join(e.cmd)) sys.exit(1) if should_run_action(\"install\", args.build_actions): print(\"** Installing %s **\" %", "product in get_products(package_dir): src = os.path.join(build_dir, product) dest = os.path.join(bin_dir,", "= os.path.dirname(__file__) parsed.package_dir = os.path.realpath( os.path.join(repo_path, parsed.package_dir)) # Convert build_dir", "%s' % ' '.join(e.cmd)) sys.exit(1) # Returns true if any", "the rpath to sourcekitd relative in the toolchain rpaths_to_delete_for_this_product +=", "failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) if", "action_names: if should_run_action(action_name, selected_actions): return True return False def should_run_action(action_name,", "elif package_dir.endswith(\"/SwiftEvolve\"): return ['swift-evolve'] else: return [] if __name__ ==", "creates its own build. No need to build if we", "' in arg: return '\"%s\"' % arg.replace('\"', '\\\\\"') else: return", "toolchain to use when building this package') parser.add_argument('--update', action='store_true', help='update", "action\") parsed.swift_exec = os.path.join(parsed.toolchain, 'usr', 'bin', 'swift') parsed.sourcekitd_dir = os.path.join(parsed.toolchain,", "choices=['all', 'build', 'test', 'install', 'generate-xcodeproj'], nargs=\"*\", default=['build']) parsed = parser.parse_args(args)", "dependencies of %s **\" % package_name) try: update_swiftpm_dependencies(package_dir=args.package_dir, swift_exec=args.swift_exec, build_dir=args.build_dir,", "**\" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='build', products=get_products(args.package_dir), sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir,", "it does not need to rebuilt if it has already", "# targets simultaneously. For now, just build one product after", "verbose): bin_dir = os.path.join(install_dir, 'bin') lib_dir = os.path.join(install_dir, 'lib', 'swift',", "rdar://53881101 is implemented, we cannot request a build of multiple", "= os.path.join(install_dir, 'lib', 'swift', 'macosx') for directory in [bin_dir, lib_dir]:", "os.path.join(install_dir, 'bin') lib_dir = os.path.join(install_dir, 'lib', 'swift', 'macosx') for directory", "rpath to sourcekitd relative in the toolchain rpaths_to_delete_for_this_product += [sourcekit_searchpath]", "os.path.dirname(__file__) parsed.package_dir = os.path.realpath( os.path.join(repo_path, parsed.package_dir)) # Convert build_dir to", "when building this package') parser.add_argument('--update', action='store_true', help='update all SwiftPM dependencies')", "implemented, we cannot request a build of multiple # targets", "build of SwiftSyntax with other projects.') parser.add_argument('--toolchain', required=True, help='the toolchain", "import print_function import argparse import sys import os, platform import", "['sk-stress-test', 'sk-swiftc-wrapper'] elif package_dir.endswith(\"/SwiftEvolve\"): return ['swift-evolve'] else: return [] if", "Generating Xcode project for %s **\" % package_name) try: generate_xcodeproj(args.package_dir,", "'generate-xcodeproj', '--xcconfig-overrides', config_path, '--output', xcodeproj_path] check_call(args, env=env, verbose=verbose) def add_rpath(binary,", "we are just testing if should_run_any_action(['build', 'install'], args.build_actions): print(\"** Building", "Building %s **\" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='build', products=get_products(args.package_dir),", "a helper script for the main swift repository's build-script.py that", "' '.join(e.cmd)) sys.exit(1) # The test action creates its own", "Make the rpath to sourcekitd relative in the toolchain rpaths_to_delete_for_this_product", "['install_name_tool', '-delete_rpath', rpath, binary] check_call(cmd, verbose=verbose) def check_call(cmd, verbose, env=os.environ,", "= os.path.join(parsed.package_dir, parsed.build_dir) return parsed def run(args): sourcekit_searchpath=args.sourcekitd_dir package_name =", "action='build', products=get_products(args.package_dir), sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError", "so that # it does not need to rebuilt if", "binary] check_call(cmd, verbose=verbose) def check_call(cmd, verbose, env=os.environ, **kwargs): if verbose:", "env=os.environ, **kwargs): if verbose: print(' '.join([escape_cmd_arg(arg) for arg in cmd]))", "except subprocess.CalledProcessError as e: printerr('FAIL: Building %s failed' % package_name)", "in the toolchain rpaths_to_add = ['@executable_path/../lib/swift/macosx'] if product in ['sk-stress-test',", "'.join(e.cmd)) sys.exit(1) # The test action creates its own build.", "= os.path.realpath(os.path.join(args.build_dir, args.config)) if should_run_action(\"generate-xcodeproj\", args.build_actions): print(\"** Generating Xcode project", "LD_RUNPATH_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) '''.format(sourcekit_searchpath=sourcekit_searchpath)) xcodeproj_path = os.path.join(package_dir, '%s.xcodeproj' %", "normal remote dependencies when building') parser.add_argument('build_actions', help=\"Extra actions to perform.", "install_package(package_dir, install_dir, sourcekit_searchpath, build_dir, rpaths_to_delete, verbose): bin_dir = os.path.join(install_dir, 'bin')", "build_dir=args.build_dir, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Updating dependencies", "'sk-swiftc-wrapper'] elif package_dir.endswith(\"/SwiftEvolve\"): return ['swift-evolve'] else: return [] if __name__", "should_run_action(\"test\", args.build_actions): print(\"** Testing %s **\" % package_name) try: invoke_swift(package_dir=args.package_dir,", "update_swiftpm_dependencies(package_dir=args.package_dir, swift_exec=args.swift_exec, build_dir=args.build_dir, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL:", "%s' % ' '.join(e.cmd)) sys.exit(1) if should_run_action(\"test\", args.build_actions): print(\"** Testing", "if not args.no_local_deps: env['SWIFTCI_USE_LOCAL_DEPS'] = \"1\" if args.update: print(\"** Updating", "'swift-evolve']: # Make the rpath to sourcekitd relative in the", "in ['sk-stress-test', 'swift-evolve']: # Make the rpath to sourcekitd relative", "arg: return '\"%s\"' % arg.replace('\"', '\\\\\"') else: return arg def", "help=\"Extra actions to perform. Can be any number of the", "parser.add_argument('-v', '--verbose', action='store_true', help='log executed commands') parser.add_argument('--prefix', help='install path') parser.add_argument('--config',", "product]) # Tell SwiftSyntax that we are building in a", "printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) if should_run_action(\"install\", args.build_actions): print(\"**", "action == 'test': args.extend(['--test-product', product]) else: args.extend(['--product', product]) # Tell", "path') parser.add_argument('--config', default='debug') parser.add_argument('--build-dir', default='.build') parser.add_argument('--multiroot-data-file', help='Path to an Xcode", "Convert build_dir to absolute path, relative to package_dir. parsed.build_dir =", "rpaths_to_delete: remove_rpath(dest, rpath, verbose=verbose) for rpath in rpaths_to_add: add_rpath(dest, rpath,", "Apple Inc. and the Swift project authors Licensed under Apache", "dependencies of %s failed' % package_name) printerr('Executing: %s' % '", "except subprocess.CalledProcessError as e: printerr('FAIL: Testing %s failed' % package_name)", "parsed def run(args): sourcekit_searchpath=args.sourcekitd_dir package_name = os.path.basename(args.package_dir) env = dict(os.environ)", "use when building this package') parser.add_argument('--update', action='store_true', help='update all SwiftPM", "build and install the stress tester utilities given a swift", "swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose): args", "it rpaths_to_delete_for_this_product = list(rpaths_to_delete) # Add the rpath to the", "to %s' % (os.path.basename(src), dest)) check_call(copy_cmd, verbose=verbose) for rpath in", "of the Swift.org open source project Copyright (c) 2014 -", "%s **\" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='build', products=get_products(args.package_dir), sourcekit_searchpath=sourcekit_searchpath,", "2018 Apple Inc. and the Swift project authors Licensed under", "get_products(package_dir): src = os.path.join(build_dir, product) dest = os.path.join(bin_dir, product) #", "true if any of the actions in `action_names` should be", "Use local dependencies (i.e. checked out next sourcekit-lsp). if not", "(i.e. checked out next sourcekit-lsp). if not args.no_local_deps: env['SWIFTCI_USE_LOCAL_DEPS'] =", "% package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) # The", "build. No need to build if we are just testing", "given a swift workspace. \"\"\" from __future__ import print_function import", "action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose): args =", "Testing %s **\" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='test', products=['%sPackageTests'", "package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='build', products=get_products(args.package_dir), sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config,", "['sk-stress-test', 'swift-evolve']: # Make the rpath to sourcekitd relative in", "as e: printerr('FAIL: Installing %s failed' % package_name) printerr('Executing: %s'", "verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Updating dependencies of %s", "import subprocess def printerr(message): print(message, file=sys.stderr) def main(argv_prefix = []):", "action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose) def invoke_swift_single_product(package_dir,", "parser.add_argument('--multiroot-data-file', help='Path to an Xcode workspace to create a unified", "args.no_local_deps: env['SWIFTCI_USE_LOCAL_DEPS'] = \"1\" if args.update: print(\"** Updating dependencies of", "'''.format(sourcekit_searchpath=sourcekit_searchpath)) xcodeproj_path = os.path.join(package_dir, '%s.xcodeproj' % package_name) args = [swift_exec,", "env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Testing %s failed'", "for the main swift repository's build-script.py that knows how to", "product after the other. for product in products: invoke_swift_single_product(package_dir, swift_exec,", "def invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env,", "lib_dir]: if not os.path.exists(directory): os.makedirs(directory) # Install sk-stress-test and sk-swiftc-wrapper", "build_dir, multiroot_data_file, config, env, verbose): args = [swift_exec, action, '--package-path',", "verbose=verbose) def install(src, dest, rpaths_to_delete, rpaths_to_add, verbose): copy_cmd=['rsync', '-a', src,", "rpath, verbose=verbose) for rpath in rpaths_to_add: add_rpath(dest, rpath, verbose=verbose) def", "= [swift_exec, action, '--package-path', package_dir, '-c', config, '--build-path', build_dir] if", "for action_name in action_names: if should_run_action(action_name, selected_actions): return True return", "when building') parser.add_argument('build_actions', help=\"Extra actions to perform. Can be any", "package_name = os.path.basename(args.package_dir) env = dict(os.environ) # Use local dependencies", "to the stdlib in in the toolchain rpaths_to_add = ['@executable_path/../lib/swift/macosx']", "sourcekit_searchpath, env, verbose): package_name = os.path.basename(package_dir) config_path = os.path.join(package_dir, 'Config.xcconfig')", "return [item for pair in zip([value] * len(list), list) for", "# FIXME: We ought to be able to query SwiftPM", "os.makedirs(directory) # Install sk-stress-test and sk-swiftc-wrapper for product in get_products(package_dir):", "\"\"\" from __future__ import print_function import argparse import sys import", "install action\") parsed.swift_exec = os.path.join(parsed.toolchain, 'usr', 'bin', 'swift') parsed.sourcekitd_dir =", "if should_run_action(action_name, selected_actions): return True return False def should_run_action(action_name, selected_actions):", "following\", choices=['all', 'build', 'test', 'install', 'generate-xcodeproj'], nargs=\"*\", default=['build']) parsed =", "Building %s failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd))", "failed') printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) if should_run_action(\"test\", args.build_actions):", "build_dir=output_dir, rpaths_to_delete=[stdlib_dir], verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Installing %s", "See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors ------------------------------------------------------------------------------", "for directory in [bin_dir, lib_dir]: if not os.path.exists(directory): os.makedirs(directory) #", "in arg: return '\"%s\"' % arg.replace('\"', '\\\\\"') else: return arg", "subprocess.CalledProcessError as e: printerr('FAIL: Building %s failed' % package_name) printerr('Executing:", "and the Swift project authors Licensed under Apache License v2.0", "package_name) stdlib_dir = os.path.join(args.toolchain, 'usr', 'lib', 'swift', 'macosx') try: install_package(args.package_dir,", "subprocess.CalledProcessError as e: printerr('FAIL: Generating the Xcode project failed') printerr('Executing:", "package_dir.endswith(\"/SourceKitStressTester\"): return ['sk-stress-test', 'sk-swiftc-wrapper'] elif package_dir.endswith(\"/SwiftEvolve\"): return ['swift-evolve'] else: return", "subprocess.check_call(cmd, env=env, stderr=subprocess.STDOUT, **kwargs) def interleave(value, list): return [item for", "env, verbose): args = [swift_exec, 'package', '--package-path', package_dir, '--build-path', build_dir,", "rpath, binary] check_call(cmd, verbose=verbose) def remove_rpath(binary, rpath, verbose): cmd =", "e: printerr('FAIL: Building %s failed' % package_name) printerr('Executing: %s' %", "as e: printerr('FAIL: Generating the Xcode project failed') printerr('Executing: %s'", "return True return False def should_run_action(action_name, selected_actions): if action_name in", "e: printerr('FAIL: Updating dependencies of %s failed' % package_name) printerr('Executing:", "build_dir, env, verbose): args = [swift_exec, 'package', '--package-path', package_dir, '--build-path',", "multiroot_data_file, config, env, verbose): args = [swift_exec, action, '--package-path', package_dir,", "are building in a build-script environment so that # it", "env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH'] = sourcekit_searchpath check_call(args, env=env, verbose=verbose) def install_package(package_dir, install_dir, sourcekit_searchpath,", "projects.') parser.add_argument('--toolchain', required=True, help='the toolchain to use when building this", "rpath to the stdlib in in the toolchain rpaths_to_add =", "in parsed.build_actions) and not parsed.prefix: ArgumentParser.error(\"'--prefix' is required with the", "product) # Create a copy of the list since we", "check_call(copy_cmd, verbose=verbose) for rpath in rpaths_to_delete: remove_rpath(dest, rpath, verbose=verbose) for", "and sk-swiftc-wrapper for product in get_products(package_dir): src = os.path.join(build_dir, product)", "package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='test', products=['%sPackageTests' % package_name], sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir,", "we modify it rpaths_to_delete_for_this_product = list(rpaths_to_delete) # Add the rpath", "'usr', 'lib') # Convert package_dir to absolute path, relative to", "Testing %s failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd))", "generate_xcodeproj(args.package_dir, swift_exec=args.swift_exec, sourcekit_searchpath=sourcekit_searchpath, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL:", "swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose) def", "package_name = os.path.basename(package_dir) config_path = os.path.join(package_dir, 'Config.xcconfig') with open(config_path, 'w')", "= os.path.join(package_dir, '%s.xcodeproj' % package_name) args = [swift_exec, 'package', '--package-path',", "action='store_true', help='log executed commands') parser.add_argument('--prefix', help='install path') parser.add_argument('--config', default='debug') parser.add_argument('--build-dir',", "part of the Swift.org open source project Copyright (c) 2014", "e: printerr('FAIL: Installing %s failed' % package_name) printerr('Executing: %s' %", "parser.add_argument('--build-dir', default='.build') parser.add_argument('--multiroot-data-file', help='Path to an Xcode workspace to create", "parser.add_argument('build_actions', help=\"Extra actions to perform. Can be any number of", "Updating dependencies of %s failed' % package_name) printerr('Executing: %s' %", "the other. for product in products: invoke_swift_single_product(package_dir, swift_exec, action, product,", "See https://swift.org/LICENSE.txt for license information See https://swift.org/CONTRIBUTORS.txt for the list", "def escape_cmd_arg(arg): if '\"' in arg or ' ' in", "verbose=verbose) def invoke_swift(package_dir, swift_exec, action, products, sourcekit_searchpath, build_dir, multiroot_data_file, config,", "to query SwiftPM for this info. if package_dir.endswith(\"/SourceKitStressTester\"): return ['sk-stress-test',", "in `action_names` should be run. def should_run_any_action(action_names, selected_actions): for action_name", "'install', 'generate-xcodeproj'], nargs=\"*\", default=['build']) parsed = parser.parse_args(args) if (\"install\" in", "to perform. Can be any number of the following\", choices=['all',", "{sourcekit_searchpath} $(inherited) '''.format(sourcekit_searchpath=sourcekit_searchpath)) xcodeproj_path = os.path.join(package_dir, '%s.xcodeproj' % package_name) args", "verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Testing %s failed' %", "def get_products(package_dir): # FIXME: We ought to be able to", "cmd])) return subprocess.check_call(cmd, env=env, stderr=subprocess.STDOUT, **kwargs) def interleave(value, list): return", "default='SourceKitStressTester') parser.add_argument('-v', '--verbose', action='store_true', help='log executed commands') parser.add_argument('--prefix', help='install path')", "and install the stress tester utilities given a swift workspace.", "$(inherited) '''.format(sourcekit_searchpath=sourcekit_searchpath)) xcodeproj_path = os.path.join(package_dir, '%s.xcodeproj' % package_name) args =", "license information See https://swift.org/CONTRIBUTORS.txt for the list of Swift project", "% package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) # Returns", "workspace. \"\"\" from __future__ import print_function import argparse import sys", "printerr('FAIL: Generating the Xcode project failed') printerr('Executing: %s' % '", "rpath, verbose): cmd = ['install_name_tool', '-add_rpath', rpath, binary] check_call(cmd, verbose=verbose)", "def update_swiftpm_dependencies(package_dir, swift_exec, build_dir, env, verbose): args = [swift_exec, 'package',", "the main swift repository's build-script.py that knows how to build", "multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Testing", "swift_exec, build_dir, env, verbose): args = [swift_exec, 'package', '--package-path', package_dir,", "verbose): cmd = ['install_name_tool', '-delete_rpath', rpath, binary] check_call(cmd, verbose=verbose) def", "https://swift.org/LICENSE.txt for license information See https://swift.org/CONTRIBUTORS.txt for the list of", "return arg def get_products(package_dir): # FIXME: We ought to be", "utilities given a swift workspace. \"\"\" from __future__ import print_function", "generate_xcodeproj(package_dir, swift_exec, sourcekit_searchpath, env, verbose): package_name = os.path.basename(package_dir) config_path =", "if verbose: print(' '.join([escape_cmd_arg(arg) for arg in cmd])) return subprocess.check_call(cmd,", "%s **\" % package_name) stdlib_dir = os.path.join(args.toolchain, 'usr', 'lib', 'swift',", "sourcekit_searchpath=sourcekit_searchpath, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Generating the", "swift_exec, action, products, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose): #", "now, just build one product after the other. for product", "'build', 'test', 'install', 'generate-xcodeproj'], nargs=\"*\", default=['build']) parsed = parser.parse_args(args) if", "os.path.join(parsed.toolchain, 'usr', 'lib') # Convert package_dir to absolute path, relative", "for product in get_products(package_dir): src = os.path.join(build_dir, product) dest =", "= {sourcekit_searchpath} $(inherited) LD_RUNPATH_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) '''.format(sourcekit_searchpath=sourcekit_searchpath)) xcodeproj_path =", "{sourcekit_searchpath} $(inherited) LD_RUNPATH_SEARCH_PATHS = {sourcekit_searchpath} $(inherited) '''.format(sourcekit_searchpath=sourcekit_searchpath)) xcodeproj_path = os.path.join(package_dir,", "os.path.join(install_dir, 'lib', 'swift', 'macosx') for directory in [bin_dir, lib_dir]: if", "product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose) def invoke_swift_single_product(package_dir, swift_exec,", "any number of the following\", choices=['all', 'build', 'test', 'install', 'generate-xcodeproj'],", "perform. Can be any number of the following\", choices=['all', 'build',", "build_dir, 'update'] check_call(args, env=env, verbose=verbose) def invoke_swift(package_dir, swift_exec, action, products,", "package_name) try: update_swiftpm_dependencies(package_dir=args.package_dir, swift_exec=args.swift_exec, build_dir=args.build_dir, env=env, verbose=args.verbose) except subprocess.CalledProcessError as", "# Create a copy of the list since we modify", "License v2.0 with Runtime Library Exception See https://swift.org/LICENSE.txt for license", "invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='test', products=['%sPackageTests' % package_name], sourcekit_searchpath=sourcekit_searchpath, build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config,", "default='debug') parser.add_argument('--build-dir', default='.build') parser.add_argument('--multiroot-data-file', help='Path to an Xcode workspace to", "(\"install\" in parsed.build_actions or \"all\" in parsed.build_actions) and not parsed.prefix:", "dest)) check_call(copy_cmd, verbose=verbose) for rpath in rpaths_to_delete: remove_rpath(dest, rpath, verbose=verbose)", "printerr(message): print(message, file=sys.stderr) def main(argv_prefix = []): args = parse_args(argv_prefix", "build_dir=args.build_dir, multiroot_data_file=args.multiroot_data_file, config=args.config, env=env, verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL:", "verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Generating the Xcode project", "main(argv_prefix = []): args = parse_args(argv_prefix + sys.argv[1:]) run(args) def", "for %s **\" % package_name) try: generate_xcodeproj(args.package_dir, swift_exec=args.swift_exec, sourcekit_searchpath=sourcekit_searchpath, env=env,", "rpaths_to_delete_for_this_product = list(rpaths_to_delete) # Add the rpath to the stdlib", "if (\"install\" in parsed.build_actions or \"all\" in parsed.build_actions) and not", "invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath, build_dir, multiroot_data_file, config, env, verbose):", "% package_name) args = [swift_exec, 'package', '--package-path', package_dir, 'generate-xcodeproj', '--xcconfig-overrides',", "to root of repo. repo_path = os.path.dirname(__file__) parsed.package_dir = os.path.realpath(", "except subprocess.CalledProcessError as e: printerr('FAIL: Updating dependencies of %s failed'", "should_run_action(action_name, selected_actions): if action_name in selected_actions: return True elif \"all\"", "os.path.join(parsed.package_dir, parsed.build_dir) return parsed def run(args): sourcekit_searchpath=args.sourcekitd_dir package_name = os.path.basename(args.package_dir)", "rpath in rpaths_to_delete: remove_rpath(dest, rpath, verbose=verbose) for rpath in rpaths_to_add:", "parser.add_argument('--config', default='debug') parser.add_argument('--build-dir', default='.build') parser.add_argument('--multiroot-data-file', help='Path to an Xcode workspace", "parser.add_argument('--no-local-deps', action='store_true', help='use normal remote dependencies when building') parser.add_argument('build_actions', help=\"Extra", "with other projects.') parser.add_argument('--toolchain', required=True, help='the toolchain to use when", "need to build if we are just testing if should_run_any_action(['build',", "sourcekit-lsp). if not args.no_local_deps: env['SWIFTCI_USE_LOCAL_DEPS'] = \"1\" if args.update: print(\"**", "of multiple # targets simultaneously. For now, just build one", "No need to build if we are just testing if", "selected_actions): if action_name in selected_actions: return True elif \"all\" in", "SwiftSyntax that we are building in a build-script environment so", "config, env, verbose): # Until rdar://53881101 is implemented, we cannot", "subprocess.CalledProcessError as e: printerr('FAIL: Testing %s failed' % package_name) printerr('Executing:", "def invoke_swift(package_dir, swift_exec, action, products, sourcekit_searchpath, build_dir, multiroot_data_file, config, env,", "swift repository's build-script.py that knows how to build and install", "def main(argv_prefix = []): args = parse_args(argv_prefix + sys.argv[1:]) run(args)", "product) dest = os.path.join(bin_dir, product) # Create a copy of", "os, platform import subprocess def printerr(message): print(message, file=sys.stderr) def main(argv_prefix", "help='install path') parser.add_argument('--config', default='debug') parser.add_argument('--build-dir', default='.build') parser.add_argument('--multiroot-data-file', help='Path to an", "try: install_package(args.package_dir, install_dir=args.prefix, sourcekit_searchpath=sourcekit_searchpath, build_dir=output_dir, rpaths_to_delete=[stdlib_dir], verbose=args.verbose) except subprocess.CalledProcessError as", "------------------------------------------------------------------------------ This is a helper script for the main swift", "of the following\", choices=['all', 'build', 'test', 'install', 'generate-xcodeproj'], nargs=\"*\", default=['build'])", "# Returns true if any of the actions in `action_names`", "%s **\" % package_name) try: update_swiftpm_dependencies(package_dir=args.package_dir, swift_exec=args.swift_exec, build_dir=args.build_dir, env=env, verbose=args.verbose)", "of %s **\" % package_name) try: update_swiftpm_dependencies(package_dir=args.package_dir, swift_exec=args.swift_exec, build_dir=args.build_dir, env=env,", "commands') parser.add_argument('--prefix', help='install path') parser.add_argument('--config', default='debug') parser.add_argument('--build-dir', default='.build') parser.add_argument('--multiroot-data-file', help='Path", "Xcode project failed') printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) if", "sk-stress-test and sk-swiftc-wrapper for product in get_products(package_dir): src = os.path.join(build_dir,", "+ sys.argv[1:]) run(args) def parse_args(args): parser = argparse.ArgumentParser(prog='BUILD-SCRIPT-HELPER.PY') parser.add_argument('--package-dir', default='SourceKitStressTester')", "number of the following\", choices=['all', 'build', 'test', 'install', 'generate-xcodeproj'], nargs=\"*\",", "its own build. No need to build if we are", "rpath, verbose=verbose) def generate_xcodeproj(package_dir, swift_exec, sourcekit_searchpath, env, verbose): package_name =", "e: printerr('FAIL: Testing %s failed' % package_name) printerr('Executing: %s' %", "check_call(args, env=env, verbose=verbose) def add_rpath(binary, rpath, verbose): cmd = ['install_name_tool',", "% ' '.join(e.cmd)) sys.exit(1) if should_run_action(\"install\", args.build_actions): print(\"** Installing %s", "in a build-script environment so that # it does not", "= ['@executable_path/../lib/swift/macosx'] if product in ['sk-stress-test', 'swift-evolve']: # Make the", "**kwargs) def interleave(value, list): return [item for pair in zip([value]", "= os.path.join(bin_dir, product) # Create a copy of the list", "check_call(cmd, verbose=verbose) def check_call(cmd, verbose, env=os.environ, **kwargs): if verbose: print('", "= os.path.join(args.toolchain, 'usr', 'lib', 'swift', 'macosx') try: install_package(args.package_dir, install_dir=args.prefix, sourcekit_searchpath=sourcekit_searchpath,", "multiroot_data_file, config, env, verbose) def invoke_swift_single_product(package_dir, swift_exec, action, product, sourcekit_searchpath,", "the Swift.org open source project Copyright (c) 2014 - 2018", "parser.add_argument('--toolchain', required=True, help='the toolchain to use when building this package')", "main swift repository's build-script.py that knows how to build and", "toolchain rpaths_to_add = ['@executable_path/../lib/swift/macosx'] if product in ['sk-stress-test', 'swift-evolve']: #", "os.path.join(build_dir, product) dest = os.path.join(bin_dir, product) # Create a copy", "env=env, verbose=verbose) def install_package(package_dir, install_dir, sourcekit_searchpath, build_dir, rpaths_to_delete, verbose): bin_dir", "argparse import sys import os, platform import subprocess def printerr(message):", "with open(config_path, 'w') as config_file: config_file.write(''' SYSTEM_FRAMEWORK_SEARCH_PATHS = {sourcekit_searchpath} $(inherited)", "def run(args): sourcekit_searchpath=args.sourcekitd_dir package_name = os.path.basename(args.package_dir) env = dict(os.environ) #", "Licensed under Apache License v2.0 with Runtime Library Exception See", "print('installing %s to %s' % (os.path.basename(src), dest)) check_call(copy_cmd, verbose=verbose) for", "not args.no_local_deps: env['SWIFTCI_USE_LOCAL_DEPS'] = \"1\" if args.update: print(\"** Updating dependencies", "def printerr(message): print(message, file=sys.stderr) def main(argv_prefix = []): args =", "Tell SwiftSyntax that we are building in a build-script environment", "file=sys.stderr) def main(argv_prefix = []): args = parse_args(argv_prefix + sys.argv[1:])", "rpaths_to_delete=[stdlib_dir], verbose=args.verbose) except subprocess.CalledProcessError as e: printerr('FAIL: Installing %s failed'", "in parsed.build_actions or \"all\" in parsed.build_actions) and not parsed.prefix: ArgumentParser.error(\"'--prefix'", "src, dest] print('installing %s to %s' % (os.path.basename(src), dest)) check_call(copy_cmd,", "to create a unified build of SwiftSyntax with other projects.')", "'--package-path', package_dir, '-c', config, '--build-path', build_dir] if multiroot_data_file: args.extend(['--multiroot-data-file', multiroot_data_file])", "sys import os, platform import subprocess def printerr(message): print(message, file=sys.stderr)", "verbose): args = [swift_exec, 'package', '--package-path', package_dir, '--build-path', build_dir, 'update']", "info. if package_dir.endswith(\"/SourceKitStressTester\"): return ['sk-stress-test', 'sk-swiftc-wrapper'] elif package_dir.endswith(\"/SwiftEvolve\"): return ['swift-evolve']", "that knows how to build and install the stress tester", "'usr', 'lib', 'swift', 'macosx') try: install_package(args.package_dir, install_dir=args.prefix, sourcekit_searchpath=sourcekit_searchpath, build_dir=output_dir, rpaths_to_delete=[stdlib_dir],", "os.path.exists(directory): os.makedirs(directory) # Install sk-stress-test and sk-swiftc-wrapper for product in", "'lib') # Convert package_dir to absolute path, relative to root", "repository's build-script.py that knows how to build and install the", "Generating the Xcode project failed') printerr('Executing: %s' % ' '.join(e.cmd))", "**\" % package_name) try: update_swiftpm_dependencies(package_dir=args.package_dir, swift_exec=args.swift_exec, build_dir=args.build_dir, env=env, verbose=args.verbose) except", "rpath, verbose): cmd = ['install_name_tool', '-delete_rpath', rpath, binary] check_call(cmd, verbose=verbose)", "env, verbose): package_name = os.path.basename(package_dir) config_path = os.path.join(package_dir, 'Config.xcconfig') with", "(os.path.basename(src), dest)) check_call(copy_cmd, verbose=verbose) for rpath in rpaths_to_delete: remove_rpath(dest, rpath,", "= []): args = parse_args(argv_prefix + sys.argv[1:]) run(args) def parse_args(args):", "in [bin_dir, lib_dir]: if not os.path.exists(directory): os.makedirs(directory) # Install sk-stress-test", "% package_name) stdlib_dir = os.path.join(args.toolchain, 'usr', 'lib', 'swift', 'macosx') try:", "' '.join(e.cmd)) sys.exit(1) output_dir = os.path.realpath(os.path.join(args.build_dir, args.config)) if should_run_action(\"generate-xcodeproj\", args.build_actions):", "project authors ------------------------------------------------------------------------------ This is a helper script for the", "args.build_actions): print(\"** Testing %s **\" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec,", "a build of multiple # targets simultaneously. For now, just", "is required with the install action\") parsed.swift_exec = os.path.join(parsed.toolchain, 'usr',", "package_dir to absolute path, relative to root of repo. repo_path", "% package_name) printerr('Executing: %s' % ' '.join(e.cmd)) sys.exit(1) output_dir =", "True else: return False def update_swiftpm_dependencies(package_dir, swift_exec, build_dir, env, verbose):", "is a helper script for the main swift repository's build-script.py", "stderr=subprocess.STDOUT, **kwargs) def interleave(value, list): return [item for pair in", "= os.path.join(parsed.toolchain, 'usr', 'bin', 'swift') parsed.sourcekitd_dir = os.path.join(parsed.toolchain, 'usr', 'lib')", "Apache License v2.0 with Runtime Library Exception See https://swift.org/LICENSE.txt for", "list of Swift project authors ------------------------------------------------------------------------------ This is a helper", "help='the toolchain to use when building this package') parser.add_argument('--update', action='store_true',", "= dict(os.environ) # Use local dependencies (i.e. checked out next", "' '.join(e.cmd)) sys.exit(1) if should_run_action(\"install\", args.build_actions): print(\"** Installing %s **\"", "**\" % package_name) try: invoke_swift(package_dir=args.package_dir, swift_exec=args.swift_exec, action='test', products=['%sPackageTests' % package_name],", "swift_exec, sourcekit_searchpath, env, verbose): package_name = os.path.basename(package_dir) config_path = os.path.join(package_dir,", "Updating dependencies of %s **\" % package_name) try: update_swiftpm_dependencies(package_dir=args.package_dir, swift_exec=args.swift_exec,", "build one product after the other. for product in products:", "def check_call(cmd, verbose, env=os.environ, **kwargs): if verbose: print(' '.join([escape_cmd_arg(arg) for", "= ['install_name_tool', '-add_rpath', rpath, binary] check_call(cmd, verbose=verbose) def remove_rpath(binary, rpath,", "os.path.realpath( os.path.join(repo_path, parsed.package_dir)) # Convert build_dir to absolute path, relative", "install(src, dest, rpaths_to_delete=rpaths_to_delete_for_this_product, rpaths_to_add=rpaths_to_add, verbose=verbose) def install(src, dest, rpaths_to_delete, rpaths_to_add,", "'test', 'install', 'generate-xcodeproj'], nargs=\"*\", default=['build']) parsed = parser.parse_args(args) if (\"install\"", "rpaths_to_delete, rpaths_to_add, verbose): copy_cmd=['rsync', '-a', src, dest] print('installing %s to", "should be run. def should_run_any_action(action_names, selected_actions): for action_name in action_names:", "file is part of the Swift.org open source project Copyright", "(c) 2014 - 2018 Apple Inc. and the Swift project", "The test action creates its own build. No need to", "how to build and install the stress tester utilities given", "print(message, file=sys.stderr) def main(argv_prefix = []): args = parse_args(argv_prefix +", "import argparse import sys import os, platform import subprocess def", "args.update: print(\"** Updating dependencies of %s **\" % package_name) try:", "Installing %s failed' % package_name) printerr('Executing: %s' % ' '.join(e.cmd))", "are just testing if should_run_any_action(['build', 'install'], args.build_actions): print(\"** Building %s", "the install action\") parsed.swift_exec = os.path.join(parsed.toolchain, 'usr', 'bin', 'swift') parsed.sourcekitd_dir", "interleave(value, list): return [item for pair in zip([value] * len(list),", "= [swift_exec, 'package', '--package-path', package_dir, 'generate-xcodeproj', '--xcconfig-overrides', config_path, '--output', xcodeproj_path]", "query SwiftPM for this info. if package_dir.endswith(\"/SourceKitStressTester\"): return ['sk-stress-test', 'sk-swiftc-wrapper']", "python \"\"\" This source file is part of the Swift.org", "project authors Licensed under Apache License v2.0 with Runtime Library", "information See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors", "for rpath in rpaths_to_delete: remove_rpath(dest, rpath, verbose=verbose) for rpath in", "not os.path.exists(directory): os.makedirs(directory) # Install sk-stress-test and sk-swiftc-wrapper for product", "\"all\" in selected_actions: return True else: return False def update_swiftpm_dependencies(package_dir,", "run(args): sourcekit_searchpath=args.sourcekitd_dir package_name = os.path.basename(args.package_dir) env = dict(os.environ) # Use", "args.config)) if should_run_action(\"generate-xcodeproj\", args.build_actions): print(\"** Generating Xcode project for %s", "env['SWIFT_BUILD_SCRIPT_ENVIRONMENT'] = '1' env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH'] = sourcekit_searchpath check_call(args, env=env, verbose=verbose) def", "Exception See https://swift.org/LICENSE.txt for license information See https://swift.org/CONTRIBUTORS.txt for the", "install_dir, sourcekit_searchpath, build_dir, rpaths_to_delete, verbose): bin_dir = os.path.join(install_dir, 'bin') lib_dir", "for item in pair] def escape_cmd_arg(arg): if '\"' in arg", "# Until rdar://53881101 is implemented, we cannot request a build", "[item for pair in zip([value] * len(list), list) for item", "the stress tester utilities given a swift workspace. \"\"\" from", "swift workspace. \"\"\" from __future__ import print_function import argparse import", "# Tell SwiftSyntax that we are building in a build-script", "for arg in cmd])) return subprocess.check_call(cmd, env=env, stderr=subprocess.STDOUT, **kwargs) def", "other projects.') parser.add_argument('--toolchain', required=True, help='the toolchain to use when building", "= sourcekit_searchpath check_call(args, env=env, verbose=verbose) def install_package(package_dir, install_dir, sourcekit_searchpath, build_dir,", "should_run_any_action(action_names, selected_actions): for action_name in action_names: if should_run_action(action_name, selected_actions): return", "Runtime Library Exception See https://swift.org/LICENSE.txt for license information See https://swift.org/CONTRIBUTORS.txt", "should_run_action(\"generate-xcodeproj\", args.build_actions): print(\"** Generating Xcode project for %s **\" %", "project Copyright (c) 2014 - 2018 Apple Inc. and the", "%s **\" % package_name) try: generate_xcodeproj(args.package_dir, swift_exec=args.swift_exec, sourcekit_searchpath=sourcekit_searchpath, env=env, verbose=args.verbose)", "else: args.extend(['--product', product]) # Tell SwiftSyntax that we are building", "default=['build']) parsed = parser.parse_args(args) if (\"install\" in parsed.build_actions or \"all\"", "testing if should_run_any_action(['build', 'install'], args.build_actions): print(\"** Building %s **\" %", "'1' env['SWIFT_STRESS_TESTER_SOURCEKIT_SEARCHPATH'] = sourcekit_searchpath check_call(args, env=env, verbose=verbose) def install_package(package_dir, install_dir,", "in pair] def escape_cmd_arg(arg): if '\"' in arg or '", "rpaths_to_delete=rpaths_to_delete_for_this_product, rpaths_to_add=rpaths_to_add, verbose=verbose) def install(src, dest, rpaths_to_delete, rpaths_to_add, verbose): copy_cmd=['rsync'," ]
[ "works.\"\"\" data = { \"groups\": { \"1\": { \"id\": \"Light", "Verify service calls mock_deconz_put_request( aioclient_mock, config_entry.data, \"/groups/1/scenes/1/recall\" ) # Service", "config_entry.data, \"/groups/1/scenes/1/recall\" ) # Service turn on scene await opp.services.async_call(", "patch from openpeerpower.components.scene import DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON from openpeerpower.const", "Service turn on scene await opp.services.async_call( SCENE_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: \"scene.light_group_scene\"},", "# Verify service calls mock_deconz_put_request( aioclient_mock, config_entry.data, \"/groups/1/scenes/1/recall\" ) #", "opp.states.get(\"scene.light_group_scene\") # Verify service calls mock_deconz_put_request( aioclient_mock, config_entry.data, \"/groups/1/scenes/1/recall\" )", "service calls mock_deconz_put_request( aioclient_mock, config_entry.data, \"/groups/1/scenes/1/recall\" ) # Service turn", "scenes can be loaded without scenes being available.\"\"\" await setup_deconz_integration(opp,", "mock_deconz_put_request( aioclient_mock, config_entry.data, \"/groups/1/scenes/1/recall\" ) # Service turn on scene", "await opp.services.async_call( SCENE_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: \"scene.light_group_scene\"}, blocking=True, ) assert aioclient_mock.mock_calls[1][2]", "scene platform tests.\"\"\" from unittest.mock import patch from openpeerpower.components.scene import", "{\"all_on\": False, \"any_on\": True}, \"action\": {}, \"scenes\": [{\"id\": \"1\", \"name\":", "{ \"1\": { \"id\": \"Light group id\", \"name\": \"Light group\",", "\"/groups/1/scenes/1/recall\" ) # Service turn on scene await opp.services.async_call( SCENE_DOMAIN,", "calls mock_deconz_put_request( aioclient_mock, config_entry.data, \"/groups/1/scenes/1/recall\" ) # Service turn on", "can be loaded without scenes being available.\"\"\" await setup_deconz_integration(opp, aioclient_mock)", "being available.\"\"\" await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 0 async", "platform tests.\"\"\" from unittest.mock import patch from openpeerpower.components.scene import DOMAIN", "aioclient_mock) assert len(opp.states.async_all()) == 1 assert opp.states.get(\"scene.light_group_scene\") # Verify service", "opp.services.async_call( SCENE_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: \"scene.light_group_scene\"}, blocking=True, ) assert aioclient_mock.mock_calls[1][2] ==", "await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 1 assert opp.states.get(\"scene.light_group_scene\") #", "async def test_no_scenes(opp, aioclient_mock): \"\"\"Test that scenes can be loaded", "turn on scene await opp.services.async_call( SCENE_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: \"scene.light_group_scene\"}, blocking=True,", "setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 0 async def test_scenes(opp, aioclient_mock):", "from .test_gateway import ( DECONZ_WEB_REQUEST, mock_deconz_put_request, setup_deconz_integration, ) async def", "available.\"\"\" await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 0 async def", "len(opp.states.async_all()) == 1 assert opp.states.get(\"scene.light_group_scene\") # Verify service calls mock_deconz_put_request(", "\"LightGroup\", \"state\": {\"all_on\": False, \"any_on\": True}, \"action\": {}, \"scenes\": [{\"id\":", "\"\"\"Test that scenes can be loaded without scenes being available.\"\"\"", "\"1\", \"name\": \"Scene\"}], \"lights\": [], } } } with patch.dict(DECONZ_WEB_REQUEST,", "\"lights\": [], } } } with patch.dict(DECONZ_WEB_REQUEST, data): config_entry =", "\"type\": \"LightGroup\", \"state\": {\"all_on\": False, \"any_on\": True}, \"action\": {}, \"scenes\":", "aioclient_mock): \"\"\"Test that scenes works.\"\"\" data = { \"groups\": {", "ATTR_ENTITY_ID from .test_gateway import ( DECONZ_WEB_REQUEST, mock_deconz_put_request, setup_deconz_integration, ) async", ") async def test_no_scenes(opp, aioclient_mock): \"\"\"Test that scenes can be", "without scenes being available.\"\"\" await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) ==", "= { \"groups\": { \"1\": { \"id\": \"Light group id\",", "unittest.mock import patch from openpeerpower.components.scene import DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON", "scenes works.\"\"\" data = { \"groups\": { \"1\": { \"id\":", "{ \"id\": \"Light group id\", \"name\": \"Light group\", \"type\": \"LightGroup\",", "\"any_on\": True}, \"action\": {}, \"scenes\": [{\"id\": \"1\", \"name\": \"Scene\"}], \"lights\":", "} } with patch.dict(DECONZ_WEB_REQUEST, data): config_entry = await setup_deconz_integration(opp, aioclient_mock)", "} } } with patch.dict(DECONZ_WEB_REQUEST, data): config_entry = await setup_deconz_integration(opp,", "aioclient_mock): \"\"\"Test that scenes can be loaded without scenes being", "\"Light group\", \"type\": \"LightGroup\", \"state\": {\"all_on\": False, \"any_on\": True}, \"action\":", "<filename>tests/components/deconz/test_scene.py \"\"\"deCONZ scene platform tests.\"\"\" from unittest.mock import patch from", "as SCENE_DOMAIN, SERVICE_TURN_ON from openpeerpower.const import ATTR_ENTITY_ID from .test_gateway import", "with patch.dict(DECONZ_WEB_REQUEST, data): config_entry = await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all())", "[], } } } with patch.dict(DECONZ_WEB_REQUEST, data): config_entry = await", "\"scene.light_group_scene\"}, blocking=True, ) assert aioclient_mock.mock_calls[1][2] == {} await opp.config_entries.async_unload(config_entry.entry_id) assert", "import patch from openpeerpower.components.scene import DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON from", "loaded without scenes being available.\"\"\" await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all())", "\"id\": \"Light group id\", \"name\": \"Light group\", \"type\": \"LightGroup\", \"state\":", "from unittest.mock import patch from openpeerpower.components.scene import DOMAIN as SCENE_DOMAIN,", "def test_no_scenes(opp, aioclient_mock): \"\"\"Test that scenes can be loaded without", "that scenes works.\"\"\" data = { \"groups\": { \"1\": {", "\"1\": { \"id\": \"Light group id\", \"name\": \"Light group\", \"type\":", "from openpeerpower.const import ATTR_ENTITY_ID from .test_gateway import ( DECONZ_WEB_REQUEST, mock_deconz_put_request,", "SERVICE_TURN_ON, {ATTR_ENTITY_ID: \"scene.light_group_scene\"}, blocking=True, ) assert aioclient_mock.mock_calls[1][2] == {} await", "blocking=True, ) assert aioclient_mock.mock_calls[1][2] == {} await opp.config_entries.async_unload(config_entry.entry_id) assert len(opp.states.async_all())", "= await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 1 assert opp.states.get(\"scene.light_group_scene\")", "DECONZ_WEB_REQUEST, mock_deconz_put_request, setup_deconz_integration, ) async def test_no_scenes(opp, aioclient_mock): \"\"\"Test that", ") # Service turn on scene await opp.services.async_call( SCENE_DOMAIN, SERVICE_TURN_ON,", "data = { \"groups\": { \"1\": { \"id\": \"Light group", "{ \"groups\": { \"1\": { \"id\": \"Light group id\", \"name\":", "# Service turn on scene await opp.services.async_call( SCENE_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID:", "openpeerpower.const import ATTR_ENTITY_ID from .test_gateway import ( DECONZ_WEB_REQUEST, mock_deconz_put_request, setup_deconz_integration,", ".test_gateway import ( DECONZ_WEB_REQUEST, mock_deconz_put_request, setup_deconz_integration, ) async def test_no_scenes(opp,", "\"action\": {}, \"scenes\": [{\"id\": \"1\", \"name\": \"Scene\"}], \"lights\": [], }", "assert len(opp.states.async_all()) == 0 async def test_scenes(opp, aioclient_mock): \"\"\"Test that", "True}, \"action\": {}, \"scenes\": [{\"id\": \"1\", \"name\": \"Scene\"}], \"lights\": [],", "test_no_scenes(opp, aioclient_mock): \"\"\"Test that scenes can be loaded without scenes", "test_scenes(opp, aioclient_mock): \"\"\"Test that scenes works.\"\"\" data = { \"groups\":", "async def test_scenes(opp, aioclient_mock): \"\"\"Test that scenes works.\"\"\" data =", "setup_deconz_integration, ) async def test_no_scenes(opp, aioclient_mock): \"\"\"Test that scenes can", "SERVICE_TURN_ON from openpeerpower.const import ATTR_ENTITY_ID from .test_gateway import ( DECONZ_WEB_REQUEST,", "\"Light group id\", \"name\": \"Light group\", \"type\": \"LightGroup\", \"state\": {\"all_on\":", "patch.dict(DECONZ_WEB_REQUEST, data): config_entry = await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) ==", "config_entry = await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 1 assert", "group id\", \"name\": \"Light group\", \"type\": \"LightGroup\", \"state\": {\"all_on\": False,", "len(opp.states.async_all()) == 0 async def test_scenes(opp, aioclient_mock): \"\"\"Test that scenes", "{ATTR_ENTITY_ID: \"scene.light_group_scene\"}, blocking=True, ) assert aioclient_mock.mock_calls[1][2] == {} await opp.config_entries.async_unload(config_entry.entry_id)", "DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON from openpeerpower.const import ATTR_ENTITY_ID from .test_gateway", ") assert aioclient_mock.mock_calls[1][2] == {} await opp.config_entries.async_unload(config_entry.entry_id) assert len(opp.states.async_all()) ==", "id\", \"name\": \"Light group\", \"type\": \"LightGroup\", \"state\": {\"all_on\": False, \"any_on\":", "openpeerpower.components.scene import DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON from openpeerpower.const import ATTR_ENTITY_ID", "SCENE_DOMAIN, SERVICE_TURN_ON from openpeerpower.const import ATTR_ENTITY_ID from .test_gateway import (", "assert aioclient_mock.mock_calls[1][2] == {} await opp.config_entries.async_unload(config_entry.entry_id) assert len(opp.states.async_all()) == 0", "SCENE_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: \"scene.light_group_scene\"}, blocking=True, ) assert aioclient_mock.mock_calls[1][2] == {}", "\"Scene\"}], \"lights\": [], } } } with patch.dict(DECONZ_WEB_REQUEST, data): config_entry", "aioclient_mock, config_entry.data, \"/groups/1/scenes/1/recall\" ) # Service turn on scene await", "0 async def test_scenes(opp, aioclient_mock): \"\"\"Test that scenes works.\"\"\" data", "scene await opp.services.async_call( SCENE_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: \"scene.light_group_scene\"}, blocking=True, ) assert", "\"\"\"deCONZ scene platform tests.\"\"\" from unittest.mock import patch from openpeerpower.components.scene", "\"name\": \"Scene\"}], \"lights\": [], } } } with patch.dict(DECONZ_WEB_REQUEST, data):", "\"name\": \"Light group\", \"type\": \"LightGroup\", \"state\": {\"all_on\": False, \"any_on\": True},", "} with patch.dict(DECONZ_WEB_REQUEST, data): config_entry = await setup_deconz_integration(opp, aioclient_mock) assert", "def test_scenes(opp, aioclient_mock): \"\"\"Test that scenes works.\"\"\" data = {", "assert opp.states.get(\"scene.light_group_scene\") # Verify service calls mock_deconz_put_request( aioclient_mock, config_entry.data, \"/groups/1/scenes/1/recall\"", "[{\"id\": \"1\", \"name\": \"Scene\"}], \"lights\": [], } } } with", "setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 1 assert opp.states.get(\"scene.light_group_scene\") # Verify", "mock_deconz_put_request, setup_deconz_integration, ) async def test_no_scenes(opp, aioclient_mock): \"\"\"Test that scenes", "\"\"\"Test that scenes works.\"\"\" data = { \"groups\": { \"1\":", "== 0 async def test_scenes(opp, aioclient_mock): \"\"\"Test that scenes works.\"\"\"", "assert len(opp.states.async_all()) == 1 assert opp.states.get(\"scene.light_group_scene\") # Verify service calls", "data): config_entry = await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 1", "on scene await opp.services.async_call( SCENE_DOMAIN, SERVICE_TURN_ON, {ATTR_ENTITY_ID: \"scene.light_group_scene\"}, blocking=True, )", "( DECONZ_WEB_REQUEST, mock_deconz_put_request, setup_deconz_integration, ) async def test_no_scenes(opp, aioclient_mock): \"\"\"Test", "aioclient_mock) assert len(opp.states.async_all()) == 0 async def test_scenes(opp, aioclient_mock): \"\"\"Test", "False, \"any_on\": True}, \"action\": {}, \"scenes\": [{\"id\": \"1\", \"name\": \"Scene\"}],", "be loaded without scenes being available.\"\"\" await setup_deconz_integration(opp, aioclient_mock) assert", "tests.\"\"\" from unittest.mock import patch from openpeerpower.components.scene import DOMAIN as", "1 assert opp.states.get(\"scene.light_group_scene\") # Verify service calls mock_deconz_put_request( aioclient_mock, config_entry.data,", "import ( DECONZ_WEB_REQUEST, mock_deconz_put_request, setup_deconz_integration, ) async def test_no_scenes(opp, aioclient_mock):", "\"groups\": { \"1\": { \"id\": \"Light group id\", \"name\": \"Light", "that scenes can be loaded without scenes being available.\"\"\" await", "await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 0 async def test_scenes(opp,", "\"scenes\": [{\"id\": \"1\", \"name\": \"Scene\"}], \"lights\": [], } } }", "== 1 assert opp.states.get(\"scene.light_group_scene\") # Verify service calls mock_deconz_put_request( aioclient_mock,", "{}, \"scenes\": [{\"id\": \"1\", \"name\": \"Scene\"}], \"lights\": [], } }", "group\", \"type\": \"LightGroup\", \"state\": {\"all_on\": False, \"any_on\": True}, \"action\": {},", "import ATTR_ENTITY_ID from .test_gateway import ( DECONZ_WEB_REQUEST, mock_deconz_put_request, setup_deconz_integration, )", "from openpeerpower.components.scene import DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON from openpeerpower.const import", "scenes being available.\"\"\" await setup_deconz_integration(opp, aioclient_mock) assert len(opp.states.async_all()) == 0", "\"state\": {\"all_on\": False, \"any_on\": True}, \"action\": {}, \"scenes\": [{\"id\": \"1\",", "import DOMAIN as SCENE_DOMAIN, SERVICE_TURN_ON from openpeerpower.const import ATTR_ENTITY_ID from" ]
[ "= str(config_dir / 'main_config.ini') HOSTS_CONFIG_PATH = str(config_dir / 'hosts_config.ini') MAILBOT_CONFIG_PATH", "some_option = ['foo', 'bar'] Will return: ['foo', 'bar'] ''' import", "class API_SERVER: section = 'api.server' BACKEND = config.get(section, 'backend', fallback='gevent')", "-> None: '''Safe means that it won't override existing configuration'''", "to a valid python list. Fallback value is returned when", "files exist''' def __init__(self): # 1. Check if all config", "# Use in-memory (before: sqlite:///test_database.sqlite) class API: section = 'api'", "= config.getfloat(section, 'update_interval', fallback=30.0) STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0) SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS", "'bar'] Will return: ['foo', 'bar'] ''' import ast try: raw_arguments", "(TensorHive tries to load these by default) config_dir = PosixPath.home()", "Detected missing default config file(s), recreating...') self.recreate_default_configuration_files() log.info('[•] All configs", "files # (TensorHive tries to load these by default) config_dir", "config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0) SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section, \"schedule_queued_jobs_when_free_mins\", fallback=30) class AUTH:", "wrong (e.g. option or value not present) Example .ini file,", "default {} settings from config.py'.format(displayed_title)) return config ConfigInitilizer() config =", "''').format(env=name).split('\\n') log.warning(msg[0]) log.warning(msg[1]) class SSH: section = 'ssh' HOSTS_CONFIG_FILE =", "= full_path(config.get(section, 'log_dir', fallback=default_path)) LOG_CLEANUP_ACTION = config.getint(section, 'log_cleanup_action', fallback=2) class", "SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section, \"schedule_queued_jobs_when_free_mins\", fallback=30) class AUTH: from datetime import", "value not present) Example .ini file, function called with arguments:", "fallback=False) class MAILBOT: mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot') section = 'general'", "yaml.safe_load(file) class APP_SERVER: section = 'web_app.server' BACKEND = config.get(section, 'backend',", "str) -> Optional[Dict]: # type: ignore '''Parses [proxy_tunneling] section''' config", "fallback=2.0) class PROTECTION_SERVICE: section = 'protection_service' ENABLED = config.getboolean(section, 'enabled',", "ignore ''' Parses value for option from string to a", "str) -> Dict: # type: ignore '''Parses sections containing hostnames'''", "os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only) except Exception: log.error('[✘] Unable", "Reading {} config from {}'.format(displayed_title, full_path)) else: log.warning('[✘] Configuration file", "config.getboolean(section, 'enabled', fallback=False): return { 'proxy_host': config.get(section, 'proxy_host'), 'proxy_user': config.get(section,", "declared''' if not os.getenv(name): msg = cleandoc( ''' {env} -", "dst)) class ConfigLoader: @staticmethod def load(path, displayed_title=''): import configparser config", "= hosts_config_to_dict(HOSTS_CONFIG_FILE) PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE) class DB: section = 'database'", "uri_for_path(path: str) -> str: # type: ignore return 'sqlite:///{}'.format(PosixPath(path).expanduser()) SQLALCHEMY_DATABASE_URI", "'''Makes sure that env variable is declared''' if not os.getenv(name):", "recreate_default_configuration_files(self) -> None: try: # 1. Create directory for stroing", "'debug', fallback=False) class MONITORING_SERVICE: section = 'monitoring_service' ENABLED = config.getboolean(section,", "inspect import cleandoc import shutil import tensorhive import os import", "uppercase class atributes (class must be defined first) Example usage:", "fallback='0.0.0.0') PORT = config.getint(section, 'port', fallback=5000) WORKERS = config.getint(section, 'workers',", "= 'web_app.server' BACKEND = config.get(section, 'backend', fallback='gunicorn') HOST = config.get(section,", "TEST_ON_STARTUP = config.getboolean(section, 'test_on_startup', fallback=True) TIMEOUT = config.getfloat(section, 'timeout', fallback=10.0)", "for section in hosts_config.sections(): # We want to parse only", "= config.getfloat(section, 'update_interval', fallback=2.0) NOTIFY_ON_PTY = config.getboolean(section, 'notify_on_pty', fallback=True) NOTIFY_VIA_EMAIL", "None AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE) PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE) class DB: section", "config.getfloat(section, 'update_interval', fallback=2.0) LOG_DIR = full_path(config.get(section, 'log_dir', fallback=default_path)) LOG_CLEANUP_ACTION =", "fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) LOG_DIR = full_path(config.get(section, 'log_dir',", "to load these by default) config_dir = PosixPath.home() / '.config/TensorHive'", "cls.__dict__.items(): if key.isupper(): print('{} = {}'.format(key, value)) def check_env_var(name: str):", "from {}'.format(displayed_title, full_path)) else: log.warning('[✘] Configuration file not found ({})'.format(full_path))", "ConfigLoader: @staticmethod def load(path, displayed_title=''): import configparser config = configparser.ConfigParser(strict=False)", "/ 'hosts_config.ini') MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir / 'mailbot_config.ini') ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir", "os.getenv(name): msg = cleandoc( ''' {env} - undeclared environment variable!", "'general' INTERVAL = mailbot_config.getfloat(section, 'interval', fallback=10.0) MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section, 'max_emails_per_protection_interval',", "{}'.format(dst)) else: shutil.copy(src, dst) log.info('Copied {} to {}'.format(src, dst)) class", "str(tensorhive_package_dir / 'hosts_config.ini') MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir / 'mailbot_config.ini') ALEMBIC_CONFIG_PATH =", "to get file templates from # (Clone file when it's", "= ast.literal_eval(raw_arguments) return parsed_arguments except (configparser.Error, ValueError): log.warning('Parsing [auth] config", "= config.getboolean(section, 'test_on_startup', fallback=True) TIMEOUT = config.getfloat(section, 'timeout', fallback=10.0) NUM_RETRIES", "AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE) PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE) class DB: section =", "if section == 'proxy_tunneling': continue hostname = section result[hostname] =", "'bar'] ''' import ast try: raw_arguments = config.get('auth', option) parsed_arguments", "-> Dict: # type: ignore '''Parses sections containing hostnames''' hosts_config", "import tensorhive import os import logging log = logging.getLogger(__name__) class", "'proxy_tunneling' # Check if section is present and if yes,", "ignore return 'sqlite:///{}'.format(PosixPath(path).expanduser()) SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section, 'path', fallback=default_path)) TEST_DATABASE_URI =", "config.get(section, 'spec_file', fallback='api_specification.yml') IMPL_LOCATION = config.get(section, 'impl_location', fallback='tensorhive.api.controllers') import yaml", "Change config files permission rw_owner_only = 0o600 os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH,", "= cleandoc( ''' {env} - undeclared environment variable! Try this:", "ignore '''Parses sections containing hostnames''' hosts_config = ConfigLoader.load(path, displayed_title='hosts') result", "/ 'mailbot_config.ini') ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir / 'alembic.ini') MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir", "= config.getboolean(section, 'enabled', fallback=True) ENABLE_GPU_MONITOR = config.getboolean(section, 'enable_gpu_monitor', fallback=True) UPDATE_INTERVAL", "ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=30.0)", "directory) tensorhive_package_dir = PosixPath(__file__).parent MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'main_config.ini') HOSTS_CONFIG_TEMPLATE_PATH", "self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH) # 3. Change config files permission rw_owner_only =", "and \\ PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists() if not all_exist: log.warning('[•]", "result[hostname] = { 'user': hosts_config.get(hostname, 'user'), 'port': hosts_config.getint(hostname, 'port', fallback=22)", "log.warning('[✘] Configuration file not found ({})'.format(full_path)) log.info('Using default {} settings", "fallback='0.0.0.0') URL_PREFIX = config.get(section, 'url_prefix', fallback='api') SPEC_FILE = config.get(section, 'spec_file',", "when anything goes wrong (e.g. option or value not present)", "PosixPath(dst).exists(): log.info('Skipping, file already exists: {}'.format(dst)) else: shutil.copy(src, dst) log.info('Copied", "section == 'proxy_tunneling': continue hostname = section result[hostname] = {", "= config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH) TEST_ON_STARTUP = config.getboolean(section, 'test_on_startup', fallback=True) TIMEOUT", "str(config_dir / 'mailbot_config.ini') # Where to get file templates from", "UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) NOTIFY_ON_PTY = config.getboolean(section, 'notify_on_pty', fallback=True)", "API_SERVER: section = 'api.server' BACKEND = config.get(section, 'backend', fallback='gevent') HOST", "= mailbot_config.getboolean(section, 'notify_admin', fallback=False) ADMIN_EMAIL = mailbot_config.get(section, 'admin_email', fallback=None) section", "= config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=30.0) STOP_TERMINATION_ATTEMPTS_AFTER", "config.get(section, 'backend', fallback='gevent') HOST = config.get(section, 'host', fallback='0.0.0.0') PORT =", "directory for stroing config files CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True) # 2. Clone", "existing configuration''' if PosixPath(dst).exists(): log.info('Skipping, file already exists: {}'.format(dst)) else:", "import Dict, Optional, Any, List from inspect import cleandoc import", "defined first) Example usage: display_config(API_SERVER) ''' print('[{class_name}]'.format(class_name=cls.__name__)) for key, value", "from # (Clone file when it's not found in config", "'.config/TensorHive' MAIN_CONFIG_PATH = str(config_dir / 'main_config.ini') HOSTS_CONFIG_PATH = str(config_dir /", "0o600 os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only) except Exception: log.error('[✘]", "{}'.format(key, value)) def check_env_var(name: str): '''Makes sure that env variable", "PORT = config.getint(section, 'port', fallback=1111) DEBUG = config.getboolean(section, 'debug', fallback=False)", "= 'protection_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section,", "mailbot_config.getboolean(section, 'notify_intruder', fallback=True) NOTIFY_ADMIN = mailbot_config.getboolean(section, 'notify_admin', fallback=False) ADMIN_EMAIL =", "config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key') def hosts_config_to_dict(path: str) -> Dict: # type:", "'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) NOTIFY_ON_PTY = config.getboolean(section,", "def config_get_parsed(option: str, fallback: Any) -> List[str]: # type: ignore", "Check if all config files exist all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and", "'hosts_config.ini') MAILBOT_CONFIG_PATH = str(config_dir / 'mailbot_config.ini') # Where to get", "HOST = config.get(section, 'host', fallback='0.0.0.0') PORT = config.getint(section, 'port', fallback=1111)", "'auth' def config_get_parsed(option: str, fallback: Any) -> List[str]: # type:", "'template/admin' ADMIN_SUBJECT = mailbot_config.get(section, 'subject') ADMIN_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') class", "'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes', fallback=1)), 'JWT_REFRESH_TOKEN_EXPIRES': timedelta(days=config.getint(section, 'jwt_refresh_token_expires_days', fallback=1)), 'JWT_TOKEN_LOCATION': config_get_parsed('jwt_token_location',", "uri_for_path(config.get(section, 'path', fallback=default_path)) TEST_DATABASE_URI = 'sqlite://' # Use in-memory (before:", "def uri_for_path(path: str) -> str: # type: ignore return 'sqlite:///{}'.format(PosixPath(path).expanduser())", "{ 'user': hosts_config.get(hostname, 'user'), 'port': hosts_config.getint(hostname, 'port', fallback=22) } return", "config.getfloat(section, 'update_interval', fallback=2.0) NOTIFY_ON_PTY = config.getboolean(section, 'notify_on_pty', fallback=True) NOTIFY_VIA_EMAIL =", "sure that all default config files exist''' def __init__(self): #", "UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) LOG_DIR = full_path(config.get(section, 'log_dir', fallback=default_path))", "return fallback FLASK_JWT = { 'SECRET_KEY': config.get(section, 'secrect_key', fallback='jwt-some-secret'), 'JWT_BLACKLIST_ENABLED':", "str: # type: ignore return str(PosixPath(path).expanduser()) ENABLED = config.getboolean(section, 'enabled',", "when it's not found in config directory) tensorhive_package_dir = PosixPath(__file__).parent", "APP_SERVER: section = 'web_app.server' BACKEND = config.get(section, 'backend', fallback='gunicorn') HOST", "= 'template/intruder' INTRUDER_SUBJECT = mailbot_config.get(section, 'subject') INTRUDER_BODY_TEMPLATE = mailbot_config.get(section, 'html_body')", "'SECRET_KEY': config.get(section, 'secrect_key', fallback='jwt-some-secret'), 'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled', fallback=True), 'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks',", "LOG_CLEANUP_ACTION = config.getint(section, 'log_cleanup_action', fallback=2) class JOB_SCHEDULING_SERVICE: section = 'job_scheduling_service'", "{ 'SECRET_KEY': config.get(section, 'secrect_key', fallback='jwt-some-secret'), 'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled', fallback=True), 'JWT_BLACKLIST_TOKEN_CHECKS':", "(before: sqlite:///test_database.sqlite) class API: section = 'api' TITLE = config.get(section,", "if config.read(str(full_path)): log.info('[•] Reading {} config from {}'.format(displayed_title, full_path)) else:", "# 3. Change config files permission rw_owner_only = 0o600 os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH,", "(class must be defined first) Example usage: display_config(API_SERVER) ''' print('[{class_name}]'.format(class_name=cls.__name__))", "# Check if section is present and if yes, check", "hosts_config.sections(): # We want to parse only sections which describe", "ENABLE_GPU_MONITOR = config.getboolean(section, 'enable_gpu_monitor', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)", "to parse only sections which describe target hosts if section", "'email', fallback=None) SMTP_PASSWORD = mailbot_config.get(section, 'password', fallback=None) SMTP_SERVER = mailbot_config.get(section,", "fallback=50) NOTIFY_INTRUDER = mailbot_config.getboolean(section, 'notify_intruder', fallback=True) NOTIFY_ADMIN = mailbot_config.getboolean(section, 'notify_admin',", "class SSH: section = 'ssh' HOSTS_CONFIG_FILE = config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH)", "templates from # (Clone file when it's not found in", "= str(config_dir / 'hosts_config.ini') MAILBOT_CONFIG_PATH = str(config_dir / 'mailbot_config.ini') #", "config.get(section, 'url_hostname', fallback='0.0.0.0') URL_PREFIX = config.get(section, 'url_prefix', fallback='api') SPEC_FILE =", "PORT = config.getint(section, 'port', fallback=5000) WORKERS = config.getint(section, 'workers', fallback=4)", "shutil import tensorhive import os import logging log = logging.getLogger(__name__)", "config.get(section, 'url_prefix', fallback='api') SPEC_FILE = config.get(section, 'spec_file', fallback='api_specification.yml') IMPL_LOCATION =", "= config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0) SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section, \"schedule_queued_jobs_when_free_mins\", fallback=30) class", "fallback='api') SPEC_FILE = config.get(section, 'spec_file', fallback='api_specification.yml') IMPL_LOCATION = config.get(section, 'impl_location',", "= 'api.server' BACKEND = config.get(section, 'backend', fallback='gevent') HOST = config.get(section,", "= config.getboolean(section, 'debug', fallback=False) class MONITORING_SERVICE: section = 'monitoring_service' ENABLED", "rw_owner_only) os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only) except Exception: log.error('[✘] Unable to recreate configuration", "else: return None AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE) PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE) class", "parsed_arguments = ast.literal_eval(raw_arguments) return parsed_arguments except (configparser.Error, ValueError): log.warning('Parsing [auth]", "fallback='~/.config/TensorHive/ssh_key') def hosts_config_to_dict(path: str) -> Dict: # type: ignore '''Parses", "value in cls.__dict__.items(): if key.isupper(): print('{} = {}'.format(key, value)) def", "fallback=10.0) NUM_RETRIES = config.getint(section, 'number_of_retries', fallback=1) KEY_FILE = config.get(section, 'key_file',", "section result[hostname] = { 'user': hosts_config.get(hostname, 'user'), 'port': hosts_config.getint(hostname, 'port',", "Dict: # type: ignore '''Parses sections containing hostnames''' hosts_config =", "fallback=2) class JOB_SCHEDULING_SERVICE: section = 'job_scheduling_service' ENABLED = config.getboolean(section, 'enabled',", "log.info('[•] All configs already exist, skipping...') def recreate_default_configuration_files(self) -> None:", "str, fallback: Any) -> List[str]: # type: ignore ''' Parses", "-> None: try: # 1. Create directory for stroing config", "import PosixPath import configparser from typing import Dict, Optional, Any,", "undeclared environment variable! Try this: `export {env}=\"...\"` ''').format(env=name).split('\\n') log.warning(msg[0]) log.warning(msg[1])", "'proxy_port', fallback=22) } else: return None AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE) PROXY", "'hosts_config.ini') MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir / 'mailbot_config.ini') ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir /", "hosts_config.get(hostname, 'user'), 'port': hosts_config.getint(hostname, 'port', fallback=22) } return result def", "dst=CONFIG_FILES.MAILBOT_CONFIG_PATH) # 3. Change config files permission rw_owner_only = 0o600", "sections which describe target hosts if section == 'proxy_tunneling': continue", "fallback=30.0) STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0) SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section, \"schedule_queued_jobs_when_free_mins\",", "'database' default_path = '~/.config/TensorHive/database.sqlite' def uri_for_path(path: str) -> str: #", "def display_config(cls): ''' Displays all uppercase class atributes (class must", "Create directory for stroing config files CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True) # 2.", "this: `export {env}=\"...\"` ''').format(env=name).split('\\n') log.warning(msg[0]) log.warning(msg[1]) class SSH: section =", "PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists() if not all_exist: log.warning('[•] Detected missing default config file(s),", "# 2. Clone templates safely from `tensorhive` package self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH)", "{}'.format(src, dst)) class ConfigLoader: @staticmethod def load(path, displayed_title=''): import configparser", "PROTECTION_SERVICE: section = 'protection_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL", "to {}'.format(src, dst)) class ConfigLoader: @staticmethod def load(path, displayed_title=''): import", "{} settings from config.py'.format(displayed_title)) return config ConfigInitilizer() config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH,", "section = 'template/admin' ADMIN_SUBJECT = mailbot_config.get(section, 'subject') ADMIN_BODY_TEMPLATE = mailbot_config.get(section,", "= config.getint(section, 'number_of_retries', fallback=1) KEY_FILE = config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key') def", "== 'proxy_tunneling': continue hostname = section result[hostname] = { 'user':", "get file templates from # (Clone file when it's not", "permission rw_owner_only = 0o600 os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only)", "ConfigLoader.load(path, displayed_title='proxy') section = 'proxy_tunneling' # Check if section is", "load these by default) config_dir = PosixPath.home() / '.config/TensorHive' MAIN_CONFIG_PATH", "all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists() if", "key, value in cls.__dict__.items(): if key.isupper(): print('{} = {}'.format(key, value))", "config.py'.format(displayed_title)) return config ConfigInitilizer() config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main') def display_config(cls):", "= config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) NOTIFY_ON_PTY", "= mailbot_config.get(section, 'smtp_server', fallback=None) SMTP_PORT = mailbot_config.getint(section, 'smtp_port', fallback=587) section", "class APP_SERVER: section = 'web_app.server' BACKEND = config.get(section, 'backend', fallback='gunicorn')", "# Where to copy files # (TensorHive tries to load", "files CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True) # 2. Clone templates safely from `tensorhive`", "= config.getboolean(section, 'notify_via_email', fallback=False) class MAILBOT: mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot')", "option) parsed_arguments = ast.literal_eval(raw_arguments) return parsed_arguments except (configparser.Error, ValueError): log.warning('Parsing", "def load(path, displayed_title=''): import configparser config = configparser.ConfigParser(strict=False) full_path =", "= config.getint(section, 'port', fallback=5000) WORKERS = config.getint(section, 'workers', fallback=4) LOG_LEVEL", "files.') def safe_copy(self, src: str, dst: str) -> None: '''Safe", "want to parse only sections which describe target hosts if", "fallback=True) NOTIFY_VIA_EMAIL = config.getboolean(section, 'notify_via_email', fallback=False) class MAILBOT: mailbot_config =", "= config.getboolean(section, 'notify_on_pty', fallback=True) NOTIFY_VIA_EMAIL = config.getboolean(section, 'notify_via_email', fallback=False) class", "fallback=True) NOTIFY_ADMIN = mailbot_config.getboolean(section, 'notify_admin', fallback=False) ADMIN_EMAIL = mailbot_config.get(section, 'admin_email',", "SMTP_PASSWORD = mailbot_config.get(section, 'password', fallback=None) SMTP_SERVER = mailbot_config.get(section, 'smtp_server', fallback=None)", "ADMIN_EMAIL = mailbot_config.get(section, 'admin_email', fallback=None) section = 'smtp' SMTP_LOGIN =", "config from {}'.format(displayed_title, full_path)) else: log.warning('[✘] Configuration file not found", "mailbot_config.get(section, 'html_body') class USAGE_LOGGING_SERVICE: section = 'usage_logging_service' default_path = '~/.config/TensorHive/logs/'", "1. Check if all config files exist all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists()", "= mailbot_config.get(section, 'admin_email', fallback=None) section = 'smtp' SMTP_LOGIN = mailbot_config.get(section,", "section = 'proxy_tunneling' # Check if section is present and", "list. Fallback value is returned when anything goes wrong (e.g.", "# 1. Check if all config files exist all_exist =", "config.has_section(section) and config.getboolean(section, 'enabled', fallback=False): return { 'proxy_host': config.get(section, 'proxy_host'),", "fallback=CONFIG_FILES.HOSTS_CONFIG_PATH) TEST_ON_STARTUP = config.getboolean(section, 'test_on_startup', fallback=True) TIMEOUT = config.getfloat(section, 'timeout',", "''' print('[{class_name}]'.format(class_name=cls.__name__)) for key, value in cls.__dict__.items(): if key.isupper(): print('{}", "List from inspect import cleandoc import shutil import tensorhive import", "result def proxy_config_to_dict(path: str) -> Optional[Dict]: # type: ignore '''Parses", "ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0)", "safe_copy(self, src: str, dst: str) -> None: '''Safe means that", "if tunneling is enabled if config.has_section(section) and config.getboolean(section, 'enabled', fallback=False):", "= config.getint(section, \"schedule_queued_jobs_when_free_mins\", fallback=30) class AUTH: from datetime import timedelta", "default) config_dir = PosixPath.home() / '.config/TensorHive' MAIN_CONFIG_PATH = str(config_dir /", "/ 'controllers/responses.yml') with open(respones_file_path, 'r') as file: RESPONSES = yaml.safe_load(file)", "'host', fallback='0.0.0.0') PORT = config.getint(section, 'port', fallback=1111) DEBUG = config.getboolean(section,", ".ini file, function called with arguments: option='some_option', fallback=None [some_section] some_option", "= config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key') def hosts_config_to_dict(path: str) -> Dict: #", "Fallback value is returned when anything goes wrong (e.g. option", "= 'ssh' HOSTS_CONFIG_FILE = config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH) TEST_ON_STARTUP = config.getboolean(section,", "fallback=1) KEY_FILE = config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key') def hosts_config_to_dict(path: str) ->", "3. Change config files permission rw_owner_only = 0o600 os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only)", "= ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot') section = 'general' INTERVAL = mailbot_config.getfloat(section, 'interval',", "typing import Dict, Optional, Any, List from inspect import cleandoc", "URL_HOSTNAME = config.get(section, 'url_hostname', fallback='0.0.0.0') URL_PREFIX = config.get(section, 'url_prefix', fallback='api')", "= mailbot_config.getint(section, 'max_emails_per_protection_interval', fallback=50) NOTIFY_INTRUDER = mailbot_config.getboolean(section, 'notify_intruder', fallback=True) NOTIFY_ADMIN", "\"schedule_queued_jobs_when_free_mins\", fallback=30) class AUTH: from datetime import timedelta section =", "fallback=True) ENABLE_GPU_MONITOR = config.getboolean(section, 'enable_gpu_monitor', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval',", "'proxy_tunneling': continue hostname = section result[hostname] = { 'user': hosts_config.get(hostname,", "and \\ PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists() if not all_exist: log.warning('[•] Detected missing default", "import shutil import tensorhive import os import logging log =", "variable! Try this: `export {env}=\"...\"` ''').format(env=name).split('\\n') log.warning(msg[0]) log.warning(msg[1]) class SSH:", "'port', fallback=22) } return result def proxy_config_to_dict(path: str) -> Optional[Dict]:", "[auth] config section failed for option \"{}\", using fallback value:", "key.isupper(): print('{} = {}'.format(key, value)) def check_env_var(name: str): '''Makes sure", "rw_owner_only = 0o600 os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only) except", "if not os.getenv(name): msg = cleandoc( ''' {env} - undeclared", "enabled if config.has_section(section) and config.getboolean(section, 'enabled', fallback=False): return { 'proxy_host':", "{}'.format(displayed_title, full_path)) else: log.warning('[✘] Configuration file not found ({})'.format(full_path)) log.info('Using", "won't override existing configuration''' if PosixPath(dst).exists(): log.info('Skipping, file already exists:", "import ast try: raw_arguments = config.get('auth', option) parsed_arguments = ast.literal_eval(raw_arguments)", "str(config_dir / 'hosts_config.ini') MAILBOT_CONFIG_PATH = str(config_dir / 'mailbot_config.ini') # Where", "'controllers/responses.yml') with open(respones_file_path, 'r') as file: RESPONSES = yaml.safe_load(file) class", "config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=30.0) STOP_TERMINATION_ATTEMPTS_AFTER =", "= { 'user': hosts_config.get(hostname, 'user'), 'port': hosts_config.getint(hostname, 'port', fallback=22) }", "continue hostname = section result[hostname] = { 'user': hosts_config.get(hostname, 'user'),", "return 'sqlite:///{}'.format(PosixPath(path).expanduser()) SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section, 'path', fallback=default_path)) TEST_DATABASE_URI = 'sqlite://'", "= 'api' TITLE = config.get(section, 'title', fallback='TensorHive API') URL_HOSTNAME =", "yes, check if tunneling is enabled if config.has_section(section) and config.getboolean(section,", "class JOB_SCHEDULING_SERVICE: section = 'job_scheduling_service' ENABLED = config.getboolean(section, 'enabled', fallback=True)", "fallback=['access', 'refresh']), 'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors', fallback=True), 'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes', fallback=1)),", "config files CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True) # 2. Clone templates safely from", "MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir / 'mailbot_config.ini') ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir / 'alembic.ini')", "default_path = '~/.config/TensorHive/database.sqlite' def uri_for_path(path: str) -> str: # type:", "config.getboolean(section, 'notify_via_email', fallback=False) class MAILBOT: mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot') section", "NOTIFY_INTRUDER = mailbot_config.getboolean(section, 'notify_intruder', fallback=True) NOTIFY_ADMIN = mailbot_config.getboolean(section, 'notify_admin', fallback=False)", "= { 'SECRET_KEY': config.get(section, 'secrect_key', fallback='jwt-some-secret'), 'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled', fallback=True),", "ValueError): log.warning('Parsing [auth] config section failed for option \"{}\", using", "fallback=True), 'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']), 'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors', fallback=True), 'JWT_ACCESS_TOKEN_EXPIRES':", "= config.get(section, 'backend', fallback='gevent') HOST = config.get(section, 'host', fallback='0.0.0.0') PORT", "def proxy_config_to_dict(path: str) -> Optional[Dict]: # type: ignore '''Parses [proxy_tunneling]", "-> str: # type: ignore return str(PosixPath(path).expanduser()) ENABLED = config.getboolean(section,", "all config files exist all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists()", "= '~/.config/TensorHive/database.sqlite' def uri_for_path(path: str) -> str: # type: ignore", "= config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) LOG_DIR", "default config file(s), recreating...') self.recreate_default_configuration_files() log.info('[•] All configs already exist,", "config.getint(section, 'number_of_retries', fallback=1) KEY_FILE = config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key') def hosts_config_to_dict(path:", "string to a valid python list. Fallback value is returned", "config.getint(section, \"schedule_queued_jobs_when_free_mins\", fallback=30) class AUTH: from datetime import timedelta section", "'enabled', fallback=True) ENABLE_GPU_MONITOR = config.getboolean(section, 'enable_gpu_monitor', fallback=True) UPDATE_INTERVAL = config.getfloat(section,", "if key.isupper(): print('{} = {}'.format(key, value)) def check_env_var(name: str): '''Makes", "Unable to recreate configuration files.') def safe_copy(self, src: str, dst:", "environment variable! Try this: `export {env}=\"...\"` ''').format(env=name).split('\\n') log.warning(msg[0]) log.warning(msg[1]) class", "section = 'template/intruder' INTRUDER_SUBJECT = mailbot_config.get(section, 'subject') INTRUDER_BODY_TEMPLATE = mailbot_config.get(section,", "and config.getboolean(section, 'enabled', fallback=False): return { 'proxy_host': config.get(section, 'proxy_host'), 'proxy_user':", "INTRUDER_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') section = 'template/admin' ADMIN_SUBJECT = mailbot_config.get(section,", "= PosixPath(__file__).parent MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'main_config.ini') HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir", "= config.getint(section, 'workers', fallback=4) LOG_LEVEL = config.get(section, 'loglevel', fallback='warning') class", "atributes (class must be defined first) Example usage: display_config(API_SERVER) '''", "PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE) class DB: section = 'database' default_path =", "All configs already exist, skipping...') def recreate_default_configuration_files(self) -> None: try:", "os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only) except Exception: log.error('[✘] Unable to recreate configuration files.')", "'notify_admin', fallback=False) ADMIN_EMAIL = mailbot_config.get(section, 'admin_email', fallback=None) section = 'smtp'", "'update_interval', fallback=2.0) LOG_DIR = full_path(config.get(section, 'log_dir', fallback=default_path)) LOG_CLEANUP_ACTION = config.getint(section,", "config section failed for option \"{}\", using fallback value: {}'.format(", "timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes', fallback=1)), 'JWT_REFRESH_TOKEN_EXPIRES': timedelta(days=config.getint(section, 'jwt_refresh_token_expires_days', fallback=1)), 'JWT_TOKEN_LOCATION': config_get_parsed('jwt_token_location', fallback=['headers'])", "proxy_config_to_dict(HOSTS_CONFIG_FILE) class DB: section = 'database' default_path = '~/.config/TensorHive/database.sqlite' def", "if not all_exist: log.warning('[•] Detected missing default config file(s), recreating...')", "({})'.format(full_path)) log.info('Using default {} settings from config.py'.format(displayed_title)) return config ConfigInitilizer()", "with arguments: option='some_option', fallback=None [some_section] some_option = ['foo', 'bar'] Will", "Exception: log.error('[✘] Unable to recreate configuration files.') def safe_copy(self, src:", "shutil.copy(src, dst) log.info('Copied {} to {}'.format(src, dst)) class ConfigLoader: @staticmethod", "fallback=2.0) LOG_DIR = full_path(config.get(section, 'log_dir', fallback=default_path)) LOG_CLEANUP_ACTION = config.getint(section, 'log_cleanup_action',", "/ 'hosts_config.ini') MAILBOT_CONFIG_PATH = str(config_dir / 'mailbot_config.ini') # Where to", "in cls.__dict__.items(): if key.isupper(): print('{} = {}'.format(key, value)) def check_env_var(name:", "already exists: {}'.format(dst)) else: shutil.copy(src, dst) log.info('Copied {} to {}'.format(src,", "# Where to get file templates from # (Clone file", "'html_body') class USAGE_LOGGING_SERVICE: section = 'usage_logging_service' default_path = '~/.config/TensorHive/logs/' def", "SSH: section = 'ssh' HOSTS_CONFIG_FILE = config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH) TEST_ON_STARTUP", "settings from config.py'.format(displayed_title)) return config ConfigInitilizer() config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main')", "'workers', fallback=4) LOG_LEVEL = config.get(section, 'loglevel', fallback='warning') class API_SERVER: section", "['foo', 'bar'] Will return: ['foo', 'bar'] ''' import ast try:", "= mailbot_config.getfloat(section, 'interval', fallback=10.0) MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section, 'max_emails_per_protection_interval', fallback=50) NOTIFY_INTRUDER", "KEY_FILE = config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key') def hosts_config_to_dict(path: str) -> Dict:", "not found in config directory) tensorhive_package_dir = PosixPath(__file__).parent MAIN_CONFIG_TEMPLATE_PATH =", "import configparser config = configparser.ConfigParser(strict=False) full_path = PosixPath(path).expanduser() if config.read(str(full_path)):", "ConfigLoader.load(path, displayed_title='hosts') result = {} for section in hosts_config.sections(): #", "check if tunneling is enabled if config.has_section(section) and config.getboolean(section, 'enabled',", "type: ignore '''Parses [proxy_tunneling] section''' config = ConfigLoader.load(path, displayed_title='proxy') section", "parse only sections which describe target hosts if section ==", "= str(tensorhive_package_dir / 'main_config.ini') HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'hosts_config.ini') MAILBOT_TEMPLATE_CONFIG_PATH", "tunneling is enabled if config.has_section(section) and config.getboolean(section, 'enabled', fallback=False): return", "env variable is declared''' if not os.getenv(name): msg = cleandoc(", "config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) NOTIFY_ON_PTY =", "'main_config.ini') HOSTS_CONFIG_PATH = str(config_dir / 'hosts_config.ini') MAILBOT_CONFIG_PATH = str(config_dir /", "hosts_config = ConfigLoader.load(path, displayed_title='hosts') result = {} for section in", "tries to load these by default) config_dir = PosixPath.home() /", "Optional[Dict]: # type: ignore '''Parses [proxy_tunneling] section''' config = ConfigLoader.load(path,", "= str(tensorhive_package_dir / 'migrations') class ConfigInitilizer: '''Makes sure that all", "INTRUDER_SUBJECT = mailbot_config.get(section, 'subject') INTRUDER_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') section =", "class MAILBOT: mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot') section = 'general' INTERVAL", "value for option from string to a valid python list.", "'~/.config/TensorHive/logs/' def full_path(path: str) -> str: # type: ignore return", "except Exception: log.error('[✘] Unable to recreate configuration files.') def safe_copy(self,", "exist_ok=True) # 2. Clone templates safely from `tensorhive` package self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH,", "by default) config_dir = PosixPath.home() / '.config/TensorHive' MAIN_CONFIG_PATH = str(config_dir", "is enabled if config.has_section(section) and config.getboolean(section, 'enabled', fallback=False): return {", "= PosixPath(path).expanduser() if config.read(str(full_path)): log.info('[•] Reading {} config from {}'.format(displayed_title,", "ADMIN_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') class USAGE_LOGGING_SERVICE: section = 'usage_logging_service' default_path", "class DB: section = 'database' default_path = '~/.config/TensorHive/database.sqlite' def uri_for_path(path:", "Any, List from inspect import cleandoc import shutil import tensorhive", "= 'sqlite://' # Use in-memory (before: sqlite:///test_database.sqlite) class API: section", "log.info('Using default {} settings from config.py'.format(displayed_title)) return config ConfigInitilizer() config", "= ConfigLoader.load(path, displayed_title='proxy') section = 'proxy_tunneling' # Check if section", "= section result[hostname] = { 'user': hosts_config.get(hostname, 'user'), 'port': hosts_config.getint(hostname,", "sqlite:///test_database.sqlite) class API: section = 'api' TITLE = config.get(section, 'title',", "fallback=2.0) NOTIFY_ON_PTY = config.getboolean(section, 'notify_on_pty', fallback=True) NOTIFY_VIA_EMAIL = config.getboolean(section, 'notify_via_email',", "'''Makes sure that all default config files exist''' def __init__(self):", "= configparser.ConfigParser(strict=False) full_path = PosixPath(path).expanduser() if config.read(str(full_path)): log.info('[•] Reading {}", "function called with arguments: option='some_option', fallback=None [some_section] some_option = ['foo',", "which describe target hosts if section == 'proxy_tunneling': continue hostname", "return { 'proxy_host': config.get(section, 'proxy_host'), 'proxy_user': config.get(section, 'proxy_user'), 'proxy_port': config.getint(section,", "'~/.config/TensorHive/database.sqlite' def uri_for_path(path: str) -> str: # type: ignore return", "file already exists: {}'.format(dst)) else: shutil.copy(src, dst) log.info('Copied {} to", "PosixPath(path).expanduser() if config.read(str(full_path)): log.info('[•] Reading {} config from {}'.format(displayed_title, full_path))", "PosixPath import configparser from typing import Dict, Optional, Any, List", "section = 'general' INTERVAL = mailbot_config.getfloat(section, 'interval', fallback=10.0) MAX_EMAILS_PER_PROTECTION_INTERVAL =", "fallback=None) SMTP_PORT = mailbot_config.getint(section, 'smtp_port', fallback=587) section = 'template/intruder' INTRUDER_SUBJECT", "full_path(path: str) -> str: # type: ignore return str(PosixPath(path).expanduser()) ENABLED", "fallback='jwt-some-secret'), 'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled', fallback=True), 'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']), 'BUNDLE_ERRORS':", "fallback=22) } else: return None AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE) PROXY =", "mailbot_config.get(section, 'password', fallback=None) SMTP_SERVER = mailbot_config.get(section, 'smtp_server', fallback=None) SMTP_PORT =", "['foo', 'bar'] ''' import ast try: raw_arguments = config.get('auth', option)", "/ 'main_config.ini') HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'hosts_config.ini') MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir", "ast.literal_eval(raw_arguments) return parsed_arguments except (configparser.Error, ValueError): log.warning('Parsing [auth] config section", "= str(PosixPath(__file__).parent / 'controllers/responses.yml') with open(respones_file_path, 'r') as file: RESPONSES", "sure that env variable is declared''' if not os.getenv(name): msg", "'log_cleanup_action', fallback=2) class JOB_SCHEDULING_SERVICE: section = 'job_scheduling_service' ENABLED = config.getboolean(section,", "log = logging.getLogger(__name__) class CONFIG_FILES: # Where to copy files", "files permission rw_owner_only = 0o600 os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH,", "'port', fallback=5000) WORKERS = config.getint(section, 'workers', fallback=4) LOG_LEVEL = config.get(section,", "file, function called with arguments: option='some_option', fallback=None [some_section] some_option =", "config.getint(section, 'proxy_port', fallback=22) } else: return None AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE)", "config.getfloat(section, 'update_interval', fallback=30.0) STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0) SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS =", "str) -> None: '''Safe means that it won't override existing", "= config.get(section, 'impl_location', fallback='tensorhive.api.controllers') import yaml respones_file_path = str(PosixPath(__file__).parent /", "'smtp_port', fallback=587) section = 'template/intruder' INTRUDER_SUBJECT = mailbot_config.get(section, 'subject') INTRUDER_BODY_TEMPLATE", "'migrations') class ConfigInitilizer: '''Makes sure that all default config files", "MAILBOT: mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot') section = 'general' INTERVAL =", "config.getboolean(section, 'test_on_startup', fallback=True) TIMEOUT = config.getfloat(section, 'timeout', fallback=10.0) NUM_RETRIES =", "config files permission rw_owner_only = 0o600 os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only)", "from inspect import cleandoc import shutil import tensorhive import os", "'max_emails_per_protection_interval', fallback=50) NOTIFY_INTRUDER = mailbot_config.getboolean(section, 'notify_intruder', fallback=True) NOTIFY_ADMIN = mailbot_config.getboolean(section,", "fallback=None) section = 'smtp' SMTP_LOGIN = mailbot_config.get(section, 'email', fallback=None) SMTP_PASSWORD", "stroing config files CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True) # 2. Clone templates safely", "str(tensorhive_package_dir / 'main_config.ini') HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'hosts_config.ini') MAILBOT_TEMPLATE_CONFIG_PATH =", "'template/intruder' INTRUDER_SUBJECT = mailbot_config.get(section, 'subject') INTRUDER_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') section", "'jwt_access_token_expires_minutes', fallback=1)), 'JWT_REFRESH_TOKEN_EXPIRES': timedelta(days=config.getint(section, 'jwt_refresh_token_expires_days', fallback=1)), 'JWT_TOKEN_LOCATION': config_get_parsed('jwt_token_location', fallback=['headers']) }", "fallback='TensorHive API') URL_HOSTNAME = config.get(section, 'url_hostname', fallback='0.0.0.0') URL_PREFIX = config.get(section,", "= uri_for_path(config.get(section, 'path', fallback=default_path)) TEST_DATABASE_URI = 'sqlite://' # Use in-memory", "'proxy_user': config.get(section, 'proxy_user'), 'proxy_port': config.getint(section, 'proxy_port', fallback=22) } else: return", "(configparser.Error, ValueError): log.warning('Parsing [auth] config section failed for option \"{}\",", "NOTIFY_ADMIN = mailbot_config.getboolean(section, 'notify_admin', fallback=False) ADMIN_EMAIL = mailbot_config.get(section, 'admin_email', fallback=None)", "displayed_title=''): import configparser config = configparser.ConfigParser(strict=False) full_path = PosixPath(path).expanduser() if", "'notify_intruder', fallback=True) NOTIFY_ADMIN = mailbot_config.getboolean(section, 'notify_admin', fallback=False) ADMIN_EMAIL = mailbot_config.get(section,", "'sqlite://' # Use in-memory (before: sqlite:///test_database.sqlite) class API: section =", "= 'usage_logging_service' default_path = '~/.config/TensorHive/logs/' def full_path(path: str) -> str:", "{}'.format( option, fallback)) return fallback FLASK_JWT = { 'SECRET_KEY': config.get(section,", "for stroing config files CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True) # 2. Clone templates", "python list. Fallback value is returned when anything goes wrong", "= PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists() if not", "exists: {}'.format(dst)) else: shutil.copy(src, dst) log.info('Copied {} to {}'.format(src, dst))", "HOSTS_CONFIG_FILE = config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH) TEST_ON_STARTUP = config.getboolean(section, 'test_on_startup', fallback=True)", "already exist, skipping...') def recreate_default_configuration_files(self) -> None: try: # 1.", "= proxy_config_to_dict(HOSTS_CONFIG_FILE) class DB: section = 'database' default_path = '~/.config/TensorHive/database.sqlite'", "'sqlite:///{}'.format(PosixPath(path).expanduser()) SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section, 'path', fallback=default_path)) TEST_DATABASE_URI = 'sqlite://' #", "'r') as file: RESPONSES = yaml.safe_load(file) class APP_SERVER: section =", "= yaml.safe_load(file) class APP_SERVER: section = 'web_app.server' BACKEND = config.get(section,", "else: log.warning('[✘] Configuration file not found ({})'.format(full_path)) log.info('Using default {}", "= {}'.format(key, value)) def check_env_var(name: str): '''Makes sure that env", "safely from `tensorhive` package self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH)", "'usage_logging_service' default_path = '~/.config/TensorHive/logs/' def full_path(path: str) -> str: #", "config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']), 'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors', fallback=True), 'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes',", "'log_dir', fallback=default_path)) LOG_CLEANUP_ACTION = config.getint(section, 'log_cleanup_action', fallback=2) class JOB_SCHEDULING_SERVICE: section", "if all config files exist all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \\", "templates safely from `tensorhive` package self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH,", "IMPL_LOCATION = config.get(section, 'impl_location', fallback='tensorhive.api.controllers') import yaml respones_file_path = str(PosixPath(__file__).parent", "in hosts_config.sections(): # We want to parse only sections which", "fallback=None) SMTP_SERVER = mailbot_config.get(section, 'smtp_server', fallback=None) SMTP_PORT = mailbot_config.getint(section, 'smtp_port',", "def full_path(path: str) -> str: # type: ignore return str(PosixPath(path).expanduser())", "fallback=None [some_section] some_option = ['foo', 'bar'] Will return: ['foo', 'bar']", "= config.get(section, 'host', fallback='0.0.0.0') PORT = config.getint(section, 'port', fallback=1111) DEBUG", "if section is present and if yes, check if tunneling", "ignore return str(PosixPath(path).expanduser()) ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL =", "'update_interval', fallback=2.0) NOTIFY_ON_PTY = config.getboolean(section, 'notify_on_pty', fallback=True) NOTIFY_VIA_EMAIL = config.getboolean(section,", "= 'proxy_tunneling' # Check if section is present and if", "dst: str) -> None: '''Safe means that it won't override", "fallback=1111) DEBUG = config.getboolean(section, 'debug', fallback=False) class MONITORING_SERVICE: section =", "# We want to parse only sections which describe target", "'impl_location', fallback='tensorhive.api.controllers') import yaml respones_file_path = str(PosixPath(__file__).parent / 'controllers/responses.yml') with", "config.getboolean(section, 'debug', fallback=False) class MONITORING_SERVICE: section = 'monitoring_service' ENABLED =", "section = 'web_app.server' BACKEND = config.get(section, 'backend', fallback='gunicorn') HOST =", "= 'monitoring_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) ENABLE_GPU_MONITOR = config.getboolean(section,", "try: raw_arguments = config.get('auth', option) parsed_arguments = ast.literal_eval(raw_arguments) return parsed_arguments", "it won't override existing configuration''' if PosixPath(dst).exists(): log.info('Skipping, file already", "= mailbot_config.get(section, 'subject') ADMIN_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') class USAGE_LOGGING_SERVICE: section", "from pathlib import PosixPath import configparser from typing import Dict,", "config.get(section, 'host', fallback='0.0.0.0') PORT = config.getint(section, 'port', fallback=1111) DEBUG =", "section = 'smtp' SMTP_LOGIN = mailbot_config.get(section, 'email', fallback=None) SMTP_PASSWORD =", "log.info('[•] Reading {} config from {}'.format(displayed_title, full_path)) else: log.warning('[✘] Configuration", "yaml respones_file_path = str(PosixPath(__file__).parent / 'controllers/responses.yml') with open(respones_file_path, 'r') as", "open(respones_file_path, 'r') as file: RESPONSES = yaml.safe_load(file) class APP_SERVER: section", "fallback=False) ADMIN_EMAIL = mailbot_config.get(section, 'admin_email', fallback=None) section = 'smtp' SMTP_LOGIN", "these by default) config_dir = PosixPath.home() / '.config/TensorHive' MAIN_CONFIG_PATH =", "str, dst: str) -> None: '''Safe means that it won't", "'test_on_startup', fallback=True) TIMEOUT = config.getfloat(section, 'timeout', fallback=10.0) NUM_RETRIES = config.getint(section,", "'secrect_key', fallback='jwt-some-secret'), 'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled', fallback=True), 'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']),", "ENABLED = config.getboolean(section, 'enabled', fallback=True) ENABLE_GPU_MONITOR = config.getboolean(section, 'enable_gpu_monitor', fallback=True)", "INTERVAL = mailbot_config.getfloat(section, 'interval', fallback=10.0) MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section, 'max_emails_per_protection_interval', fallback=50)", "src: str, dst: str) -> None: '''Safe means that it", "str(tensorhive_package_dir / 'migrations') class ConfigInitilizer: '''Makes sure that all default", "configuration''' if PosixPath(dst).exists(): log.info('Skipping, file already exists: {}'.format(dst)) else: shutil.copy(src,", "FLASK_JWT = { 'SECRET_KEY': config.get(section, 'secrect_key', fallback='jwt-some-secret'), 'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled',", "recreating...') self.recreate_default_configuration_files() log.info('[•] All configs already exist, skipping...') def recreate_default_configuration_files(self)", "'path', fallback=default_path)) TEST_DATABASE_URI = 'sqlite://' # Use in-memory (before: sqlite:///test_database.sqlite)", "None: '''Safe means that it won't override existing configuration''' if", "returned when anything goes wrong (e.g. option or value not", "file: RESPONSES = yaml.safe_load(file) class APP_SERVER: section = 'web_app.server' BACKEND", "'''Parses sections containing hostnames''' hosts_config = ConfigLoader.load(path, displayed_title='hosts') result =", "'interval', fallback=10.0) MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section, 'max_emails_per_protection_interval', fallback=50) NOTIFY_INTRUDER = mailbot_config.getboolean(section,", "'mailbot_config.ini') ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir / 'alembic.ini') MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir /", "fallback=5000) WORKERS = config.getint(section, 'workers', fallback=4) LOG_LEVEL = config.get(section, 'loglevel',", "'html_body') section = 'template/admin' ADMIN_SUBJECT = mailbot_config.get(section, 'subject') ADMIN_BODY_TEMPLATE =", "-> str: # type: ignore return 'sqlite:///{}'.format(PosixPath(path).expanduser()) SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section,", "config.getboolean(section, 'bundle_errors', fallback=True), 'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes', fallback=1)), 'JWT_REFRESH_TOKEN_EXPIRES': timedelta(days=config.getint(section, 'jwt_refresh_token_expires_days',", "str(tensorhive_package_dir / 'alembic.ini') MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir / 'migrations') class ConfigInitilizer:", "mailbot_config.get(section, 'smtp_server', fallback=None) SMTP_PORT = mailbot_config.getint(section, 'smtp_port', fallback=587) section =", "full_path(config.get(section, 'log_dir', fallback=default_path)) LOG_CLEANUP_ACTION = config.getint(section, 'log_cleanup_action', fallback=2) class JOB_SCHEDULING_SERVICE:", "import logging log = logging.getLogger(__name__) class CONFIG_FILES: # Where to", "Where to copy files # (TensorHive tries to load these", "section = 'api' TITLE = config.get(section, 'title', fallback='TensorHive API') URL_HOSTNAME", "'port', fallback=1111) DEBUG = config.getboolean(section, 'debug', fallback=False) class MONITORING_SERVICE: section", "config.get(section, 'impl_location', fallback='tensorhive.api.controllers') import yaml respones_file_path = str(PosixPath(__file__).parent / 'controllers/responses.yml')", "SPEC_FILE = config.get(section, 'spec_file', fallback='api_specification.yml') IMPL_LOCATION = config.get(section, 'impl_location', fallback='tensorhive.api.controllers')", "''' Displays all uppercase class atributes (class must be defined", "config.getboolean(section, 'enabled', fallback=True) ENABLE_GPU_MONITOR = config.getboolean(section, 'enable_gpu_monitor', fallback=True) UPDATE_INTERVAL =", "display_config(cls): ''' Displays all uppercase class atributes (class must be", "config.get(section, 'proxy_host'), 'proxy_user': config.get(section, 'proxy_user'), 'proxy_port': config.getint(section, 'proxy_port', fallback=22) }", "= config.get(section, 'host', fallback='0.0.0.0') PORT = config.getint(section, 'port', fallback=5000) WORKERS", "= mailbot_config.get(section, 'subject') INTRUDER_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') section = 'template/admin'", "class API: section = 'api' TITLE = config.get(section, 'title', fallback='TensorHive", "is returned when anything goes wrong (e.g. option or value", "BACKEND = config.get(section, 'backend', fallback='gunicorn') HOST = config.get(section, 'host', fallback='0.0.0.0')", "fallback=False): return { 'proxy_host': config.get(section, 'proxy_host'), 'proxy_user': config.get(section, 'proxy_user'), 'proxy_port':", "mailbot_config.getint(section, 'smtp_port', fallback=587) section = 'template/intruder' INTRUDER_SUBJECT = mailbot_config.get(section, 'subject')", "'stop_termination_attempts_after_mins', fallback=5.0) SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section, \"schedule_queued_jobs_when_free_mins\", fallback=30) class AUTH: from", "def check_env_var(name: str): '''Makes sure that env variable is declared'''", "config = ConfigLoader.load(path, displayed_title='proxy') section = 'proxy_tunneling' # Check if", "'timeout', fallback=10.0) NUM_RETRIES = config.getint(section, 'number_of_retries', fallback=1) KEY_FILE = config.get(section,", "str(PosixPath(path).expanduser()) ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval',", "\"{}\", using fallback value: {}'.format( option, fallback)) return fallback FLASK_JWT", "skipping...') def recreate_default_configuration_files(self) -> None: try: # 1. Create directory", "`tensorhive` package self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH) # 3.", "in config directory) tensorhive_package_dir = PosixPath(__file__).parent MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir /", "@staticmethod def load(path, displayed_title=''): import configparser config = configparser.ConfigParser(strict=False) full_path", "'smtp_server', fallback=None) SMTP_PORT = mailbot_config.getint(section, 'smtp_port', fallback=587) section = 'template/intruder'", "that it won't override existing configuration''' if PosixPath(dst).exists(): log.info('Skipping, file", "= mailbot_config.get(section, 'email', fallback=None) SMTP_PASSWORD = mailbot_config.get(section, 'password', fallback=None) SMTP_SERVER", "for option \"{}\", using fallback value: {}'.format( option, fallback)) return", "'proxy_port': config.getint(section, 'proxy_port', fallback=22) } else: return None AVAILABLE_NODES =", "config files exist''' def __init__(self): # 1. Check if all", "{env}=\"...\"` ''').format(env=name).split('\\n') log.warning(msg[0]) log.warning(msg[1]) class SSH: section = 'ssh' HOSTS_CONFIG_FILE", "ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main') def display_config(cls): ''' Displays all uppercase class atributes", "ADMIN_SUBJECT = mailbot_config.get(section, 'subject') ADMIN_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') class USAGE_LOGGING_SERVICE:", "override existing configuration''' if PosixPath(dst).exists(): log.info('Skipping, file already exists: {}'.format(dst))", "section = 'ssh' HOSTS_CONFIG_FILE = config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH) TEST_ON_STARTUP =", "config.getboolean(section, 'notify_on_pty', fallback=True) NOTIFY_VIA_EMAIL = config.getboolean(section, 'notify_via_email', fallback=False) class MAILBOT:", "configs already exist, skipping...') def recreate_default_configuration_files(self) -> None: try: #", "1. Create directory for stroing config files CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True) #", "displayed_title='mailbot') section = 'general' INTERVAL = mailbot_config.getfloat(section, 'interval', fallback=10.0) MAX_EMAILS_PER_PROTECTION_INTERVAL", "self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH) # 3. Change config files permission", "= 'database' default_path = '~/.config/TensorHive/database.sqlite' def uri_for_path(path: str) -> str:", "'web_app.server' BACKEND = config.get(section, 'backend', fallback='gunicorn') HOST = config.get(section, 'host',", "= config.get(section, 'loglevel', fallback='warning') class API_SERVER: section = 'api.server' BACKEND", "class CONFIG_FILES: # Where to copy files # (TensorHive tries", "str) -> str: # type: ignore return 'sqlite:///{}'.format(PosixPath(path).expanduser()) SQLALCHEMY_DATABASE_URI =", "= 'job_scheduling_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section,", "str(tensorhive_package_dir / 'mailbot_config.ini') ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir / 'alembic.ini') MIGRATIONS_CONFIG_PATH =", "package self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH) # 3. Change", "recreate configuration files.') def safe_copy(self, src: str, dst: str) ->", "= mailbot_config.get(section, 'password', fallback=None) SMTP_SERVER = mailbot_config.get(section, 'smtp_server', fallback=None) SMTP_PORT", "'mailbot_config.ini') # Where to get file templates from # (Clone", "'proxy_host'), 'proxy_user': config.get(section, 'proxy_user'), 'proxy_port': config.getint(section, 'proxy_port', fallback=22) } else:", "= config.get('auth', option) parsed_arguments = ast.literal_eval(raw_arguments) return parsed_arguments except (configparser.Error,", "'spec_file', fallback='api_specification.yml') IMPL_LOCATION = config.get(section, 'impl_location', fallback='tensorhive.api.controllers') import yaml respones_file_path", "configuration files.') def safe_copy(self, src: str, dst: str) -> None:", "fallback='api_specification.yml') IMPL_LOCATION = config.get(section, 'impl_location', fallback='tensorhive.api.controllers') import yaml respones_file_path =", "API: section = 'api' TITLE = config.get(section, 'title', fallback='TensorHive API')", "it's not found in config directory) tensorhive_package_dir = PosixPath(__file__).parent MAIN_CONFIG_TEMPLATE_PATH", "'protection_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval',", "/ 'main_config.ini') HOSTS_CONFIG_PATH = str(config_dir / 'hosts_config.ini') MAILBOT_CONFIG_PATH = str(config_dir", "section in hosts_config.sections(): # We want to parse only sections", "section = 'protection_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL =", "TIMEOUT = config.getfloat(section, 'timeout', fallback=10.0) NUM_RETRIES = config.getint(section, 'number_of_retries', fallback=1)", "fallback='0.0.0.0') PORT = config.getint(section, 'port', fallback=1111) DEBUG = config.getboolean(section, 'debug',", "= config.getfloat(section, 'update_interval', fallback=2.0) class PROTECTION_SERVICE: section = 'protection_service' ENABLED", "'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors', fallback=True), 'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes', fallback=1)), 'JWT_REFRESH_TOKEN_EXPIRES': timedelta(days=config.getint(section,", "rw_owner_only) os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only) except Exception: log.error('[✘] Unable to", "dst=CONFIG_FILES.MAIN_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH) # 3. Change config files", "'password', fallback=None) SMTP_SERVER = mailbot_config.get(section, 'smtp_server', fallback=None) SMTP_PORT = mailbot_config.getint(section,", "'number_of_retries', fallback=1) KEY_FILE = config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key') def hosts_config_to_dict(path: str)", "URL_PREFIX = config.get(section, 'url_prefix', fallback='api') SPEC_FILE = config.get(section, 'spec_file', fallback='api_specification.yml')", "log.warning(msg[0]) log.warning(msg[1]) class SSH: section = 'ssh' HOSTS_CONFIG_FILE = config.get(section,", "config_dir = PosixPath.home() / '.config/TensorHive' MAIN_CONFIG_PATH = str(config_dir / 'main_config.ini')", "List[str]: # type: ignore ''' Parses value for option from", "copy files # (TensorHive tries to load these by default)", "exist all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists()", "class MONITORING_SERVICE: section = 'monitoring_service' ENABLED = config.getboolean(section, 'enabled', fallback=True)", "rw_owner_only) except Exception: log.error('[✘] Unable to recreate configuration files.') def", "from config.py'.format(displayed_title)) return config ConfigInitilizer() config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main') def", "option \"{}\", using fallback value: {}'.format( option, fallback)) return fallback", "'''Parses [proxy_tunneling] section''' config = ConfigLoader.load(path, displayed_title='proxy') section = 'proxy_tunneling'", "mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot') section = 'general' INTERVAL = mailbot_config.getfloat(section,", "config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH) TEST_ON_STARTUP = config.getboolean(section, 'test_on_startup', fallback=True) TIMEOUT =", "msg = cleandoc( ''' {env} - undeclared environment variable! Try", "hosts_config_to_dict(path: str) -> Dict: # type: ignore '''Parses sections containing", "respones_file_path = str(PosixPath(__file__).parent / 'controllers/responses.yml') with open(respones_file_path, 'r') as file:", "means that it won't override existing configuration''' if PosixPath(dst).exists(): log.info('Skipping,", "for key, value in cls.__dict__.items(): if key.isupper(): print('{} = {}'.format(key,", "'notify_via_email', fallback=False) class MAILBOT: mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot') section =", "= logging.getLogger(__name__) class CONFIG_FILES: # Where to copy files #", "section failed for option \"{}\", using fallback value: {}'.format( option,", "BACKEND = config.get(section, 'backend', fallback='gevent') HOST = config.get(section, 'host', fallback='0.0.0.0')", "config.get(section, 'loglevel', fallback='warning') class API_SERVER: section = 'api.server' BACKEND =", "import configparser from typing import Dict, Optional, Any, List from", "files exist all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \\", "type: ignore '''Parses sections containing hostnames''' hosts_config = ConfigLoader.load(path, displayed_title='hosts')", "a valid python list. Fallback value is returned when anything", "section is present and if yes, check if tunneling is", "fallback='warning') class API_SERVER: section = 'api.server' BACKEND = config.get(section, 'backend',", "{} config from {}'.format(displayed_title, full_path)) else: log.warning('[✘] Configuration file not", "\\ PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists() if not all_exist: log.warning('[•] Detected", "= config.get(section, 'url_hostname', fallback='0.0.0.0') URL_PREFIX = config.get(section, 'url_prefix', fallback='api') SPEC_FILE", "= 0o600 os.chmod(CONFIG_FILES.MAIN_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only) except Exception:", "that all default config files exist''' def __init__(self): # 1.", "parsed_arguments except (configparser.Error, ValueError): log.warning('Parsing [auth] config section failed for", "Optional, Any, List from inspect import cleandoc import shutil import", "config.get(section, 'secrect_key', fallback='jwt-some-secret'), 'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled', fallback=True), 'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access',", "in-memory (before: sqlite:///test_database.sqlite) class API: section = 'api' TITLE =", "MAIN_CONFIG_PATH = str(config_dir / 'main_config.ini') HOSTS_CONFIG_PATH = str(config_dir / 'hosts_config.ini')", "= str(tensorhive_package_dir / 'hosts_config.ini') MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir / 'mailbot_config.ini') ALEMBIC_CONFIG_PATH", "(e.g. option or value not present) Example .ini file, function", "'main_config.ini') HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'hosts_config.ini') MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir /", "tensorhive import os import logging log = logging.getLogger(__name__) class CONFIG_FILES:", "'JWT_BLACKLIST_ENABLED': config.getboolean(section, 'jwt_blacklist_enabled', fallback=True), 'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']), 'BUNDLE_ERRORS': config.getboolean(section,", "= 'general' INTERVAL = mailbot_config.getfloat(section, 'interval', fallback=10.0) MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section,", "fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=30.0) STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section, 'stop_termination_attempts_after_mins',", "failed for option \"{}\", using fallback value: {}'.format( option, fallback))", "import os import logging log = logging.getLogger(__name__) class CONFIG_FILES: #", "mailbot_config.get(section, 'subject') ADMIN_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') class USAGE_LOGGING_SERVICE: section =", "to copy files # (TensorHive tries to load these by", "exist''' def __init__(self): # 1. Check if all config files", "-> List[str]: # type: ignore ''' Parses value for option", "Where to get file templates from # (Clone file when", "mailbot_config.get(section, 'admin_email', fallback=None) section = 'smtp' SMTP_LOGIN = mailbot_config.get(section, 'email',", "log.warning('[•] Detected missing default config file(s), recreating...') self.recreate_default_configuration_files() log.info('[•] All", "'api.server' BACKEND = config.get(section, 'backend', fallback='gevent') HOST = config.get(section, 'host',", "= mailbot_config.get(section, 'html_body') section = 'template/admin' ADMIN_SUBJECT = mailbot_config.get(section, 'subject')", "def __init__(self): # 1. Check if all config files exist", "- undeclared environment variable! Try this: `export {env}=\"...\"` ''').format(env=name).split('\\n') log.warning(msg[0])", "self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH) # 3. Change config", "and if yes, check if tunneling is enabled if config.has_section(section)", "= ['foo', 'bar'] Will return: ['foo', 'bar'] ''' import ast", "Check if section is present and if yes, check if", "config.read(str(full_path)): log.info('[•] Reading {} config from {}'.format(displayed_title, full_path)) else: log.warning('[✘]", "file(s), recreating...') self.recreate_default_configuration_files() log.info('[•] All configs already exist, skipping...') def", "log.info('Copied {} to {}'.format(src, dst)) class ConfigLoader: @staticmethod def load(path,", "Example usage: display_config(API_SERVER) ''' print('[{class_name}]'.format(class_name=cls.__name__)) for key, value in cls.__dict__.items():", "fallback='gunicorn') HOST = config.get(section, 'host', fallback='0.0.0.0') PORT = config.getint(section, 'port',", "else: shutil.copy(src, dst) log.info('Copied {} to {}'.format(src, dst)) class ConfigLoader:", "configparser from typing import Dict, Optional, Any, List from inspect", "usage: display_config(API_SERVER) ''' print('[{class_name}]'.format(class_name=cls.__name__)) for key, value in cls.__dict__.items(): if", "'user': hosts_config.get(hostname, 'user'), 'port': hosts_config.getint(hostname, 'port', fallback=22) } return result", "logging log = logging.getLogger(__name__) class CONFIG_FILES: # Where to copy", "TEST_DATABASE_URI = 'sqlite://' # Use in-memory (before: sqlite:///test_database.sqlite) class API:", "LOG_DIR = full_path(config.get(section, 'log_dir', fallback=default_path)) LOG_CLEANUP_ACTION = config.getint(section, 'log_cleanup_action', fallback=2)", "section = 'job_scheduling_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL =", "hostnames''' hosts_config = ConfigLoader.load(path, displayed_title='hosts') result = {} for section", "class USAGE_LOGGING_SERVICE: section = 'usage_logging_service' default_path = '~/.config/TensorHive/logs/' def full_path(path:", "NUM_RETRIES = config.getint(section, 'number_of_retries', fallback=1) KEY_FILE = config.get(section, 'key_file', fallback='~/.config/TensorHive/ssh_key')", "None: try: # 1. Create directory for stroing config files", "DB: section = 'database' default_path = '~/.config/TensorHive/database.sqlite' def uri_for_path(path: str)", "hosts_config.getint(hostname, 'port', fallback=22) } return result def proxy_config_to_dict(path: str) ->", "fallback=587) section = 'template/intruder' INTRUDER_SUBJECT = mailbot_config.get(section, 'subject') INTRUDER_BODY_TEMPLATE =", "-> Optional[Dict]: # type: ignore '''Parses [proxy_tunneling] section''' config =", "'update_interval', fallback=30.0) STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0) SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section,", "{env} - undeclared environment variable! Try this: `export {env}=\"...\"` ''').format(env=name).split('\\n')", "Try this: `export {env}=\"...\"` ''').format(env=name).split('\\n') log.warning(msg[0]) log.warning(msg[1]) class SSH: section", "= config.get(section, 'backend', fallback='gunicorn') HOST = config.get(section, 'host', fallback='0.0.0.0') PORT", "fallback=True), 'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes', fallback=1)), 'JWT_REFRESH_TOKEN_EXPIRES': timedelta(days=config.getint(section, 'jwt_refresh_token_expires_days', fallback=1)), 'JWT_TOKEN_LOCATION':", "return config ConfigInitilizer() config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main') def display_config(cls): '''", "{} for section in hosts_config.sections(): # We want to parse", "= config.get(section, 'spec_file', fallback='api_specification.yml') IMPL_LOCATION = config.get(section, 'impl_location', fallback='tensorhive.api.controllers') import", "that env variable is declared''' if not os.getenv(name): msg =", "str: # type: ignore return 'sqlite:///{}'.format(PosixPath(path).expanduser()) SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section, 'path',", "'title', fallback='TensorHive API') URL_HOSTNAME = config.get(section, 'url_hostname', fallback='0.0.0.0') URL_PREFIX =", "'key_file', fallback='~/.config/TensorHive/ssh_key') def hosts_config_to_dict(path: str) -> Dict: # type: ignore", "not found ({})'.format(full_path)) log.info('Using default {} settings from config.py'.format(displayed_title)) return", "describe target hosts if section == 'proxy_tunneling': continue hostname =", "config.getint(section, 'port', fallback=5000) WORKERS = config.getint(section, 'workers', fallback=4) LOG_LEVEL =", "'bundle_errors', fallback=True), 'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes', fallback=1)), 'JWT_REFRESH_TOKEN_EXPIRES': timedelta(days=config.getint(section, 'jwt_refresh_token_expires_days', fallback=1)),", "# (TensorHive tries to load these by default) config_dir =", "return result def proxy_config_to_dict(path: str) -> Optional[Dict]: # type: ignore", "config.getboolean(section, 'jwt_blacklist_enabled', fallback=True), 'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']), 'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors',", "logging.getLogger(__name__) class CONFIG_FILES: # Where to copy files # (TensorHive", "= ConfigLoader.load(path, displayed_title='hosts') result = {} for section in hosts_config.sections():", "config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main') def display_config(cls): ''' Displays all uppercase", "section''' config = ConfigLoader.load(path, displayed_title='proxy') section = 'proxy_tunneling' # Check", "# type: ignore return 'sqlite:///{}'.format(PosixPath(path).expanduser()) SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section, 'path', fallback=default_path))", "fallback=None) SMTP_PASSWORD = mailbot_config.get(section, 'password', fallback=None) SMTP_SERVER = mailbot_config.get(section, 'smtp_server',", "ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir / 'alembic.ini') MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir / 'migrations')", "fallback)) return fallback FLASK_JWT = { 'SECRET_KEY': config.get(section, 'secrect_key', fallback='jwt-some-secret'),", "valid python list. Fallback value is returned when anything goes", "'proxy_host': config.get(section, 'proxy_host'), 'proxy_user': config.get(section, 'proxy_user'), 'proxy_port': config.getint(section, 'proxy_port', fallback=22)", "dst=CONFIG_FILES.HOSTS_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH) # 3. Change config files permission rw_owner_only", "fallback value: {}'.format( option, fallback)) return fallback FLASK_JWT = {", "__init__(self): # 1. Check if all config files exist all_exist", "'url_hostname', fallback='0.0.0.0') URL_PREFIX = config.get(section, 'url_prefix', fallback='api') SPEC_FILE = config.get(section,", "mailbot_config.get(section, 'subject') INTRUDER_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') section = 'template/admin' ADMIN_SUBJECT", "Displays all uppercase class atributes (class must be defined first)", "type: ignore return str(PosixPath(path).expanduser()) ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL", "= 'smtp' SMTP_LOGIN = mailbot_config.get(section, 'email', fallback=None) SMTP_PASSWORD = mailbot_config.get(section,", "return None AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE) PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE) class DB:", "displayed_title='hosts') result = {} for section in hosts_config.sections(): # We", "MONITORING_SERVICE: section = 'monitoring_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) ENABLE_GPU_MONITOR", "HOSTS_CONFIG_PATH = str(config_dir / 'hosts_config.ini') MAILBOT_CONFIG_PATH = str(config_dir / 'mailbot_config.ini')", "= str(config_dir / 'mailbot_config.ini') # Where to get file templates", "config ConfigInitilizer() config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main') def display_config(cls): ''' Displays", "from datetime import timedelta section = 'auth' def config_get_parsed(option: str,", "'api' TITLE = config.get(section, 'title', fallback='TensorHive API') URL_HOSTNAME = config.get(section,", "'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH) TEST_ON_STARTUP = config.getboolean(section, 'test_on_startup', fallback=True) TIMEOUT = config.getfloat(section,", "'backend', fallback='gunicorn') HOST = config.get(section, 'host', fallback='0.0.0.0') PORT = config.getint(section,", "raw_arguments = config.get('auth', option) parsed_arguments = ast.literal_eval(raw_arguments) return parsed_arguments except", "def recreate_default_configuration_files(self) -> None: try: # 1. Create directory for", "'enable_gpu_monitor', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) class PROTECTION_SERVICE: section", "config.getfloat(section, 'update_interval', fallback=2.0) class PROTECTION_SERVICE: section = 'protection_service' ENABLED =", "mailbot_config.get(section, 'email', fallback=None) SMTP_PASSWORD = mailbot_config.get(section, 'password', fallback=None) SMTP_SERVER =", "from `tensorhive` package self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.MAILBOT_TEMPLATE_CONFIG_PATH, dst=CONFIG_FILES.MAILBOT_CONFIG_PATH) #", "RESPONSES = yaml.safe_load(file) class APP_SERVER: section = 'web_app.server' BACKEND =", "'admin_email', fallback=None) section = 'smtp' SMTP_LOGIN = mailbot_config.get(section, 'email', fallback=None)", "cleandoc( ''' {env} - undeclared environment variable! Try this: `export", "os import logging log = logging.getLogger(__name__) class CONFIG_FILES: # Where", "hosts_config_to_dict(HOSTS_CONFIG_FILE) PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE) class DB: section = 'database' default_path", "= config.getboolean(section, 'enable_gpu_monitor', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) class", "= config.get(section, 'url_prefix', fallback='api') SPEC_FILE = config.get(section, 'spec_file', fallback='api_specification.yml') IMPL_LOCATION", "value: {}'.format( option, fallback)) return fallback FLASK_JWT = { 'SECRET_KEY':", "'enabled', fallback=False): return { 'proxy_host': config.get(section, 'proxy_host'), 'proxy_user': config.get(section, 'proxy_user'),", "CONFIG_FILES: # Where to copy files # (TensorHive tries to", "all_exist: log.warning('[•] Detected missing default config file(s), recreating...') self.recreate_default_configuration_files() log.info('[•]", "or value not present) Example .ini file, function called with", "WORKERS = config.getint(section, 'workers', fallback=4) LOG_LEVEL = config.get(section, 'loglevel', fallback='warning')", "Example .ini file, function called with arguments: option='some_option', fallback=None [some_section]", "try: # 1. Create directory for stroing config files CONFIG_FILES.config_dir.mkdir(parents=True,", "= str(tensorhive_package_dir / 'alembic.ini') MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir / 'migrations') class", "# type: ignore return str(PosixPath(path).expanduser()) ENABLED = config.getboolean(section, 'enabled', fallback=True)", "\\ PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists() if not all_exist: log.warning('[•] Detected missing default config", "/ 'mailbot_config.ini') # Where to get file templates from #", "default config files exist''' def __init__(self): # 1. Check if", "config.get(section, 'title', fallback='TensorHive API') URL_HOSTNAME = config.get(section, 'url_hostname', fallback='0.0.0.0') URL_PREFIX", "= config.getint(section, 'log_cleanup_action', fallback=2) class JOB_SCHEDULING_SERVICE: section = 'job_scheduling_service' ENABLED", "missing default config file(s), recreating...') self.recreate_default_configuration_files() log.info('[•] All configs already", "Use in-memory (before: sqlite:///test_database.sqlite) class API: section = 'api' TITLE", "LOG_LEVEL = config.get(section, 'loglevel', fallback='warning') class API_SERVER: section = 'api.server'", "'user'), 'port': hosts_config.getint(hostname, 'port', fallback=22) } return result def proxy_config_to_dict(path:", "as file: RESPONSES = yaml.safe_load(file) class APP_SERVER: section = 'web_app.server'", "must be defined first) Example usage: display_config(API_SERVER) ''' print('[{class_name}]'.format(class_name=cls.__name__)) for", "} else: return None AVAILABLE_NODES = hosts_config_to_dict(HOSTS_CONFIG_FILE) PROXY = proxy_config_to_dict(HOSTS_CONFIG_FILE)", "containing hostnames''' hosts_config = ConfigLoader.load(path, displayed_title='hosts') result = {} for", "'subject') ADMIN_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') class USAGE_LOGGING_SERVICE: section = 'usage_logging_service'", "# type: ignore '''Parses sections containing hostnames''' hosts_config = ConfigLoader.load(path,", "fallback=default_path)) TEST_DATABASE_URI = 'sqlite://' # Use in-memory (before: sqlite:///test_database.sqlite) class", "for option from string to a valid python list. Fallback", "'proxy_user'), 'proxy_port': config.getint(section, 'proxy_port', fallback=22) } else: return None AVAILABLE_NODES", "= mailbot_config.get(section, 'html_body') class USAGE_LOGGING_SERVICE: section = 'usage_logging_service' default_path =", "class ConfigLoader: @staticmethod def load(path, displayed_title=''): import configparser config =", "NOTIFY_ON_PTY = config.getboolean(section, 'notify_on_pty', fallback=True) NOTIFY_VIA_EMAIL = config.getboolean(section, 'notify_via_email', fallback=False)", "config files exist all_exist = PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and", "section = 'database' default_path = '~/.config/TensorHive/database.sqlite' def uri_for_path(path: str) ->", "PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists() if not all_exist: log.warning('[•] Detected missing", "fallback='tensorhive.api.controllers') import yaml respones_file_path = str(PosixPath(__file__).parent / 'controllers/responses.yml') with open(respones_file_path,", "first) Example usage: display_config(API_SERVER) ''' print('[{class_name}]'.format(class_name=cls.__name__)) for key, value in", "all uppercase class atributes (class must be defined first) Example", "config.get(section, 'host', fallback='0.0.0.0') PORT = config.getint(section, 'port', fallback=5000) WORKERS =", "STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0) SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section, \"schedule_queued_jobs_when_free_mins\", fallback=30)", "dst) log.info('Copied {} to {}'.format(src, dst)) class ConfigLoader: @staticmethod def", "display_config(API_SERVER) ''' print('[{class_name}]'.format(class_name=cls.__name__)) for key, value in cls.__dict__.items(): if key.isupper():", "is present and if yes, check if tunneling is enabled", "ConfigInitilizer: '''Makes sure that all default config files exist''' def", "`export {env}=\"...\"` ''').format(env=name).split('\\n') log.warning(msg[0]) log.warning(msg[1]) class SSH: section = 'ssh'", "import timedelta section = 'auth' def config_get_parsed(option: str, fallback: Any)", "configparser config = configparser.ConfigParser(strict=False) full_path = PosixPath(path).expanduser() if config.read(str(full_path)): log.info('[•]", "config.getint(section, 'port', fallback=1111) DEBUG = config.getboolean(section, 'debug', fallback=False) class MONITORING_SERVICE:", "return str(PosixPath(path).expanduser()) ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section,", "found ({})'.format(full_path)) log.info('Using default {} settings from config.py'.format(displayed_title)) return config", "if yes, check if tunneling is enabled if config.has_section(section) and", "config.getfloat(section, 'timeout', fallback=10.0) NUM_RETRIES = config.getint(section, 'number_of_retries', fallback=1) KEY_FILE =", "= config.getfloat(section, 'timeout', fallback=10.0) NUM_RETRIES = config.getint(section, 'number_of_retries', fallback=1) KEY_FILE", "config file(s), recreating...') self.recreate_default_configuration_files() log.info('[•] All configs already exist, skipping...')", "ignore '''Parses [proxy_tunneling] section''' config = ConfigLoader.load(path, displayed_title='proxy') section =", "fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) class PROTECTION_SERVICE: section =", "SMTP_PORT = mailbot_config.getint(section, 'smtp_port', fallback=587) section = 'template/intruder' INTRUDER_SUBJECT =", "import cleandoc import shutil import tensorhive import os import logging", "= 'auth' def config_get_parsed(option: str, fallback: Any) -> List[str]: #", "[some_section] some_option = ['foo', 'bar'] Will return: ['foo', 'bar'] '''", "file not found ({})'.format(full_path)) log.info('Using default {} settings from config.py'.format(displayed_title))", "ConfigInitilizer() config = ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main') def display_config(cls): ''' Displays all", "target hosts if section == 'proxy_tunneling': continue hostname = section", "class AUTH: from datetime import timedelta section = 'auth' def", "sections containing hostnames''' hosts_config = ConfigLoader.load(path, displayed_title='hosts') result = {}", "timedelta section = 'auth' def config_get_parsed(option: str, fallback: Any) ->", "'loglevel', fallback='warning') class API_SERVER: section = 'api.server' BACKEND = config.get(section,", "MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'main_config.ini') HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'hosts_config.ini')", "configparser.ConfigParser(strict=False) full_path = PosixPath(path).expanduser() if config.read(str(full_path)): log.info('[•] Reading {} config", "section = 'api.server' BACKEND = config.get(section, 'backend', fallback='gevent') HOST =", "be defined first) Example usage: display_config(API_SERVER) ''' print('[{class_name}]'.format(class_name=cls.__name__)) for key,", "fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) NOTIFY_ON_PTY = config.getboolean(section, 'notify_on_pty',", "fallback=True) TIMEOUT = config.getfloat(section, 'timeout', fallback=10.0) NUM_RETRIES = config.getint(section, 'number_of_retries',", "CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True) # 2. Clone templates safely from `tensorhive` package", "return: ['foo', 'bar'] ''' import ast try: raw_arguments = config.get('auth',", "config = configparser.ConfigParser(strict=False) full_path = PosixPath(path).expanduser() if config.read(str(full_path)): log.info('[•] Reading", "{ 'proxy_host': config.get(section, 'proxy_host'), 'proxy_user': config.get(section, 'proxy_user'), 'proxy_port': config.getint(section, 'proxy_port',", "fallback=4) LOG_LEVEL = config.get(section, 'loglevel', fallback='warning') class API_SERVER: section =", "'alembic.ini') MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir / 'migrations') class ConfigInitilizer: '''Makes sure", "option='some_option', fallback=None [some_section] some_option = ['foo', 'bar'] Will return: ['foo',", "displayed_title='main') def display_config(cls): ''' Displays all uppercase class atributes (class", "pathlib import PosixPath import configparser from typing import Dict, Optional,", "str(config_dir / 'main_config.ini') HOSTS_CONFIG_PATH = str(config_dir / 'hosts_config.ini') MAILBOT_CONFIG_PATH =", "2. Clone templates safely from `tensorhive` package self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH,", "SMTP_LOGIN = mailbot_config.get(section, 'email', fallback=None) SMTP_PASSWORD = mailbot_config.get(section, 'password', fallback=None)", "/ '.config/TensorHive' MAIN_CONFIG_PATH = str(config_dir / 'main_config.ini') HOSTS_CONFIG_PATH = str(config_dir", "MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir / 'migrations') class ConfigInitilizer: '''Makes sure that", "full_path = PosixPath(path).expanduser() if config.read(str(full_path)): log.info('[•] Reading {} config from", "option or value not present) Example .ini file, function called", "option, fallback)) return fallback FLASK_JWT = { 'SECRET_KEY': config.get(section, 'secrect_key',", "= PosixPath.home() / '.config/TensorHive' MAIN_CONFIG_PATH = str(config_dir / 'main_config.ini') HOSTS_CONFIG_PATH", "fallback=False) class MONITORING_SERVICE: section = 'monitoring_service' ENABLED = config.getboolean(section, 'enabled',", "load(path, displayed_title=''): import configparser config = configparser.ConfigParser(strict=False) full_path = PosixPath(path).expanduser()", "ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH, displayed_title='mailbot') section = 'general' INTERVAL = mailbot_config.getfloat(section, 'interval', fallback=10.0)", "DEBUG = config.getboolean(section, 'debug', fallback=False) class MONITORING_SERVICE: section = 'monitoring_service'", "= config.get(section, 'title', fallback='TensorHive API') URL_HOSTNAME = config.get(section, 'url_hostname', fallback='0.0.0.0')", "'subject') INTRUDER_BODY_TEMPLATE = mailbot_config.get(section, 'html_body') section = 'template/admin' ADMIN_SUBJECT =", "datetime import timedelta section = 'auth' def config_get_parsed(option: str, fallback:", "''' Parses value for option from string to a valid", "config.getint(section, 'workers', fallback=4) LOG_LEVEL = config.get(section, 'loglevel', fallback='warning') class API_SERVER:", "TITLE = config.get(section, 'title', fallback='TensorHive API') URL_HOSTNAME = config.get(section, 'url_hostname',", "= mailbot_config.getint(section, 'smtp_port', fallback=587) section = 'template/intruder' INTRUDER_SUBJECT = mailbot_config.get(section,", "'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=30.0) STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section,", "'update_interval', fallback=2.0) class PROTECTION_SERVICE: section = 'protection_service' ENABLED = config.getboolean(section,", "= config.getint(section, 'port', fallback=1111) DEBUG = config.getboolean(section, 'debug', fallback=False) class", "to recreate configuration files.') def safe_copy(self, src: str, dst: str)", "log.error('[✘] Unable to recreate configuration files.') def safe_copy(self, src: str,", "mailbot_config.get(section, 'html_body') section = 'template/admin' ADMIN_SUBJECT = mailbot_config.get(section, 'subject') ADMIN_BODY_TEMPLATE", "config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) LOG_DIR =", "USAGE_LOGGING_SERVICE: section = 'usage_logging_service' default_path = '~/.config/TensorHive/logs/' def full_path(path: str)", "self.recreate_default_configuration_files() log.info('[•] All configs already exist, skipping...') def recreate_default_configuration_files(self) ->", "class ConfigInitilizer: '''Makes sure that all default config files exist'''", "class PROTECTION_SERVICE: section = 'protection_service' ENABLED = config.getboolean(section, 'enabled', fallback=True)", "''' {env} - undeclared environment variable! Try this: `export {env}=\"...\"`", "config.get(section, 'proxy_user'), 'proxy_port': config.getint(section, 'proxy_port', fallback=22) } else: return None", "section = 'monitoring_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) ENABLE_GPU_MONITOR =", "is declared''' if not os.getenv(name): msg = cleandoc( ''' {env}", "= 'template/admin' ADMIN_SUBJECT = mailbot_config.get(section, 'subject') ADMIN_BODY_TEMPLATE = mailbot_config.get(section, 'html_body')", "'job_scheduling_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval',", "AUTH: from datetime import timedelta section = 'auth' def config_get_parsed(option:", "Configuration file not found ({})'.format(full_path)) log.info('Using default {} settings from", "We want to parse only sections which describe target hosts", "except (configparser.Error, ValueError): log.warning('Parsing [auth] config section failed for option", "found in config directory) tensorhive_package_dir = PosixPath(__file__).parent MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir", "= ConfigLoader.load(CONFIG_FILES.MAIN_CONFIG_PATH, displayed_title='main') def display_config(cls): ''' Displays all uppercase class", "check_env_var(name: str): '''Makes sure that env variable is declared''' if", "from string to a valid python list. Fallback value is", "ast try: raw_arguments = config.get('auth', option) parsed_arguments = ast.literal_eval(raw_arguments) return", "= str(tensorhive_package_dir / 'mailbot_config.ini') ALEMBIC_CONFIG_PATH = str(tensorhive_package_dir / 'alembic.ini') MIGRATIONS_CONFIG_PATH", "hostname = section result[hostname] = { 'user': hosts_config.get(hostname, 'user'), 'port':", "return parsed_arguments except (configparser.Error, ValueError): log.warning('Parsing [auth] config section failed", "} return result def proxy_config_to_dict(path: str) -> Optional[Dict]: # type:", "str(PosixPath(__file__).parent / 'controllers/responses.yml') with open(respones_file_path, 'r') as file: RESPONSES =", "'monitoring_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) ENABLE_GPU_MONITOR = config.getboolean(section, 'enable_gpu_monitor',", "Clone templates safely from `tensorhive` package self.safe_copy(src=CONFIG_FILES.MAIN_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.MAIN_CONFIG_PATH) self.safe_copy(src=CONFIG_FILES.HOSTS_CONFIG_TEMPLATE_PATH, dst=CONFIG_FILES.HOSTS_CONFIG_PATH)", "'url_prefix', fallback='api') SPEC_FILE = config.get(section, 'spec_file', fallback='api_specification.yml') IMPL_LOCATION = config.get(section,", "value is returned when anything goes wrong (e.g. option or", "with open(respones_file_path, 'r') as file: RESPONSES = yaml.safe_load(file) class APP_SERVER:", "(Clone file when it's not found in config directory) tensorhive_package_dir", "fallback=10.0) MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section, 'max_emails_per_protection_interval', fallback=50) NOTIFY_INTRUDER = mailbot_config.getboolean(section, 'notify_intruder',", "= mailbot_config.getboolean(section, 'notify_intruder', fallback=True) NOTIFY_ADMIN = mailbot_config.getboolean(section, 'notify_admin', fallback=False) ADMIN_EMAIL", "'backend', fallback='gevent') HOST = config.get(section, 'host', fallback='0.0.0.0') PORT = config.getint(section,", "log.warning(msg[1]) class SSH: section = 'ssh' HOSTS_CONFIG_FILE = config.get(section, 'hosts_config_file',", "mailbot_config.getfloat(section, 'interval', fallback=10.0) MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section, 'max_emails_per_protection_interval', fallback=50) NOTIFY_INTRUDER =", "result = {} for section in hosts_config.sections(): # We want", "config.getboolean(section, 'enable_gpu_monitor', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) class PROTECTION_SERVICE:", "str) -> str: # type: ignore return str(PosixPath(path).expanduser()) ENABLED =", "tensorhive_package_dir = PosixPath(__file__).parent MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'main_config.ini') HOSTS_CONFIG_TEMPLATE_PATH =", "= {} for section in hosts_config.sections(): # We want to", "Dict, Optional, Any, List from inspect import cleandoc import shutil", "'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']), 'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors', fallback=True), 'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section,", "value)) def check_env_var(name: str): '''Makes sure that env variable is", "/ 'migrations') class ConfigInitilizer: '''Makes sure that all default config", "str): '''Makes sure that env variable is declared''' if not", "hosts if section == 'proxy_tunneling': continue hostname = section result[hostname]", "'refresh']), 'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors', fallback=True), 'JWT_ACCESS_TOKEN_EXPIRES': timedelta(minutes=config.getint(section, 'jwt_access_token_expires_minutes', fallback=1)), 'JWT_REFRESH_TOKEN_EXPIRES':", "section = 'auth' def config_get_parsed(option: str, fallback: Any) -> List[str]:", "log.info('Skipping, file already exists: {}'.format(dst)) else: shutil.copy(src, dst) log.info('Copied {}", "HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'hosts_config.ini') MAILBOT_TEMPLATE_CONFIG_PATH = str(tensorhive_package_dir / 'mailbot_config.ini')", "not os.getenv(name): msg = cleandoc( ''' {env} - undeclared environment", "anything goes wrong (e.g. option or value not present) Example", "log.warning('Parsing [auth] config section failed for option \"{}\", using fallback", "Will return: ['foo', 'bar'] ''' import ast try: raw_arguments =", "if PosixPath(dst).exists(): log.info('Skipping, file already exists: {}'.format(dst)) else: shutil.copy(src, dst)", "displayed_title='proxy') section = 'proxy_tunneling' # Check if section is present", "= config.getfloat(section, 'update_interval', fallback=2.0) LOG_DIR = full_path(config.get(section, 'log_dir', fallback=default_path)) LOG_CLEANUP_ACTION", "JOB_SCHEDULING_SERVICE: section = 'job_scheduling_service' ENABLED = config.getboolean(section, 'enabled', fallback=True) UPDATE_INTERVAL", "PosixPath.home() / '.config/TensorHive' MAIN_CONFIG_PATH = str(config_dir / 'main_config.ini') HOSTS_CONFIG_PATH =", "fallback=22) } return result def proxy_config_to_dict(path: str) -> Optional[Dict]: #", "called with arguments: option='some_option', fallback=None [some_section] some_option = ['foo', 'bar']", "all default config files exist''' def __init__(self): # 1. Check", "print('{} = {}'.format(key, value)) def check_env_var(name: str): '''Makes sure that", "UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=30.0) STOP_TERMINATION_ATTEMPTS_AFTER = config.getfloat(section, 'stop_termination_attempts_after_mins', fallback=5.0)", "UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) class PROTECTION_SERVICE: section = 'protection_service'", "full_path)) else: log.warning('[✘] Configuration file not found ({})'.format(full_path)) log.info('Using default", "fallback FLASK_JWT = { 'SECRET_KEY': config.get(section, 'secrect_key', fallback='jwt-some-secret'), 'JWT_BLACKLIST_ENABLED': config.getboolean(section,", "section = 'usage_logging_service' default_path = '~/.config/TensorHive/logs/' def full_path(path: str) ->", "print('[{class_name}]'.format(class_name=cls.__name__)) for key, value in cls.__dict__.items(): if key.isupper(): print('{} =", "proxy_config_to_dict(path: str) -> Optional[Dict]: # type: ignore '''Parses [proxy_tunneling] section'''", "config.get('auth', option) parsed_arguments = ast.literal_eval(raw_arguments) return parsed_arguments except (configparser.Error, ValueError):", "mailbot_config.getint(section, 'max_emails_per_protection_interval', fallback=50) NOTIFY_INTRUDER = mailbot_config.getboolean(section, 'notify_intruder', fallback=True) NOTIFY_ADMIN =", "PosixPath(__file__).parent MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'main_config.ini') HOSTS_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir /", "cleandoc import shutil import tensorhive import os import logging log", "option from string to a valid python list. Fallback value", "'port': hosts_config.getint(hostname, 'port', fallback=22) } return result def proxy_config_to_dict(path: str)", "[proxy_tunneling] section''' config = ConfigLoader.load(path, displayed_title='proxy') section = 'proxy_tunneling' #", "goes wrong (e.g. option or value not present) Example .ini", "API') URL_HOSTNAME = config.get(section, 'url_hostname', fallback='0.0.0.0') URL_PREFIX = config.get(section, 'url_prefix',", "variable is declared''' if not os.getenv(name): msg = cleandoc( '''", "def hosts_config_to_dict(path: str) -> Dict: # type: ignore '''Parses sections", "# type: ignore ''' Parses value for option from string", "file when it's not found in config directory) tensorhive_package_dir =", "present and if yes, check if tunneling is enabled if", "file templates from # (Clone file when it's not found", "mailbot_config.getboolean(section, 'notify_admin', fallback=False) ADMIN_EMAIL = mailbot_config.get(section, 'admin_email', fallback=None) section =", "'host', fallback='0.0.0.0') PORT = config.getint(section, 'port', fallback=5000) WORKERS = config.getint(section,", "Any) -> List[str]: # type: ignore ''' Parses value for", "exist, skipping...') def recreate_default_configuration_files(self) -> None: try: # 1. Create", "# 1. Create directory for stroing config files CONFIG_FILES.config_dir.mkdir(parents=True, exist_ok=True)", "type: ignore ''' Parses value for option from string to", "= '~/.config/TensorHive/logs/' def full_path(path: str) -> str: # type: ignore", "fallback=default_path)) LOG_CLEANUP_ACTION = config.getint(section, 'log_cleanup_action', fallback=2) class JOB_SCHEDULING_SERVICE: section =", "''' import ast try: raw_arguments = config.get('auth', option) parsed_arguments =", "class atributes (class must be defined first) Example usage: display_config(API_SERVER)", "'jwt_blacklist_enabled', fallback=True), 'JWT_BLACKLIST_TOKEN_CHECKS': config_get_parsed('jwt_blacklist_token_checks', fallback=['access', 'refresh']), 'BUNDLE_ERRORS': config.getboolean(section, 'bundle_errors', fallback=True),", "'smtp' SMTP_LOGIN = mailbot_config.get(section, 'email', fallback=None) SMTP_PASSWORD = mailbot_config.get(section, 'password',", "HOST = config.get(section, 'host', fallback='0.0.0.0') PORT = config.getint(section, 'port', fallback=5000)", "fallback: Any) -> List[str]: # type: ignore ''' Parses value", "fallback=5.0) SCHEDULE_QUEUED_JOBS_WHEN_FREE_MINS = config.getint(section, \"schedule_queued_jobs_when_free_mins\", fallback=30) class AUTH: from datetime", "'enabled', fallback=True) UPDATE_INTERVAL = config.getfloat(section, 'update_interval', fallback=2.0) LOG_DIR = full_path(config.get(section,", "MAILBOT_CONFIG_PATH = str(config_dir / 'mailbot_config.ini') # Where to get file", "def safe_copy(self, src: str, dst: str) -> None: '''Safe means", "fallback='gevent') HOST = config.get(section, 'host', fallback='0.0.0.0') PORT = config.getint(section, 'port',", "MAX_EMAILS_PER_PROTECTION_INTERVAL = mailbot_config.getint(section, 'max_emails_per_protection_interval', fallback=50) NOTIFY_INTRUDER = mailbot_config.getboolean(section, 'notify_intruder', fallback=True)", "config.get(section, 'backend', fallback='gunicorn') HOST = config.get(section, 'host', fallback='0.0.0.0') PORT =", "SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section, 'path', fallback=default_path)) TEST_DATABASE_URI = 'sqlite://' # Use", "'ssh' HOSTS_CONFIG_FILE = config.get(section, 'hosts_config_file', fallback=CONFIG_FILES.HOSTS_CONFIG_PATH) TEST_ON_STARTUP = config.getboolean(section, 'test_on_startup',", "config directory) tensorhive_package_dir = PosixPath(__file__).parent MAIN_CONFIG_TEMPLATE_PATH = str(tensorhive_package_dir / 'main_config.ini')", "if config.has_section(section) and config.getboolean(section, 'enabled', fallback=False): return { 'proxy_host': config.get(section,", "default_path = '~/.config/TensorHive/logs/' def full_path(path: str) -> str: # type:", "using fallback value: {}'.format( option, fallback)) return fallback FLASK_JWT =", "present) Example .ini file, function called with arguments: option='some_option', fallback=None", "not all_exist: log.warning('[•] Detected missing default config file(s), recreating...') self.recreate_default_configuration_files()", "fallback=30) class AUTH: from datetime import timedelta section = 'auth'", "{} to {}'.format(src, dst)) class ConfigLoader: @staticmethod def load(path, displayed_title=''):", "import yaml respones_file_path = str(PosixPath(__file__).parent / 'controllers/responses.yml') with open(respones_file_path, 'r')", "# (Clone file when it's not found in config directory)", "not present) Example .ini file, function called with arguments: option='some_option',", "/ 'alembic.ini') MIGRATIONS_CONFIG_PATH = str(tensorhive_package_dir / 'migrations') class ConfigInitilizer: '''Makes", "NOTIFY_VIA_EMAIL = config.getboolean(section, 'notify_via_email', fallback=False) class MAILBOT: mailbot_config = ConfigLoader.load(CONFIG_FILES.MAILBOT_CONFIG_PATH,", "Parses value for option from string to a valid python", "config_get_parsed(option: str, fallback: Any) -> List[str]: # type: ignore '''", "# type: ignore '''Parses [proxy_tunneling] section''' config = ConfigLoader.load(path, displayed_title='proxy')", "from typing import Dict, Optional, Any, List from inspect import", "os.chmod(CONFIG_FILES.HOSTS_CONFIG_PATH, rw_owner_only) os.chmod(CONFIG_FILES.MAILBOT_CONFIG_PATH, rw_owner_only) except Exception: log.error('[✘] Unable to recreate", "'notify_on_pty', fallback=True) NOTIFY_VIA_EMAIL = config.getboolean(section, 'notify_via_email', fallback=False) class MAILBOT: mailbot_config", "arguments: option='some_option', fallback=None [some_section] some_option = ['foo', 'bar'] Will return:", "only sections which describe target hosts if section == 'proxy_tunneling':", "'''Safe means that it won't override existing configuration''' if PosixPath(dst).exists():", "config.getint(section, 'log_cleanup_action', fallback=2) class JOB_SCHEDULING_SERVICE: section = 'job_scheduling_service' ENABLED =", "PosixPath(CONFIG_FILES.MAIN_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.HOSTS_CONFIG_PATH).exists() and \\ PosixPath(CONFIG_FILES.MAILBOT_CONFIG_PATH).exists() if not all_exist:", "SMTP_SERVER = mailbot_config.get(section, 'smtp_server', fallback=None) SMTP_PORT = mailbot_config.getint(section, 'smtp_port', fallback=587)", "type: ignore return 'sqlite:///{}'.format(PosixPath(path).expanduser()) SQLALCHEMY_DATABASE_URI = uri_for_path(config.get(section, 'path', fallback=default_path)) TEST_DATABASE_URI" ]
[ "self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim) self.out =", "return weighted_encoder_rep def forward(self, input, decoder_hidden, encoder_outputs): input = input.unsqueeze(0)", "Tensor class Encoder(nn.Module): def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout):", "emb_dim) self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim) self.out", "src.shape[1] max_len = trg.shape[0] trg_vocab_size = self.decoder.output_dim outputs = torch.zeros(max_len,", "self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.output_dim", "dropout self.embedding = nn.Embedding(input_dim, emb_dim) self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional", "torch.optim as optim import torch.nn.functional as F from torch import", "nn.GRU(emb_dim, enc_hid_dim, bidirectional = True) self.fc = nn.Linear(enc_hid_dim * 2,", "dec_hid_dim) self.dropout = nn.Dropout(dropout) def forward(self, src): embedded = self.dropout(self.embedding(src))", "torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))) return outputs, hidden class Attention(nn.Module):", "input, decoder_hidden, encoder_outputs): input = input.unsqueeze(0) embedded = self.dropout(self.embedding(input)) weighted_encoder_rep", "from torch import Tensor class Encoder(nn.Module): def __init__(self, input_dim, emb_dim,", "Tuple import torch import torch.nn as nn import torch.optim as", "= encoder_outputs.permute(1, 0, 2) weighted_encoder_rep = torch.bmm(a, encoder_outputs) weighted_encoder_rep =", "= weighted_encoder_rep.squeeze(0) output = self.out(torch.cat((output, weighted_encoder_rep, embedded), dim = 1))", "self.embedding = nn.Embedding(input_dim, emb_dim) self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional =", "weighted_encoder_rep def forward(self, input, decoder_hidden, encoder_outputs): input = input.unsqueeze(0) embedded", "a = self.attention(decoder_hidden, encoder_outputs) a = a.unsqueeze(1) encoder_outputs = encoder_outputs.permute(1,", "decoder is the <sos> token output = trg[0,:] for t", "enc_hid_dim self.dec_hid_dim = dec_hid_dim self.dropout = dropout self.embedding = nn.Embedding(input_dim,", "in range(1, max_len): output, hidden = self.decoder(output, hidden, encoder_outputs) outputs[t]", "2) + emb_dim, dec_hid_dim) self.out = nn.Linear(self.attention.attn_in + emb_dim, output_dim)", "= nn.Embedding(input_dim, emb_dim) self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True)", "src_len, 1) encoder_outputs = encoder_outputs.permute(1, 0, 2) energy = torch.tanh(self.attn(torch.cat((", "self.dropout = dropout self.embedding = nn.Embedding(input_dim, emb_dim) self.rnn = nn.GRU(emb_dim,", "= decoder_hidden.unsqueeze(1).repeat(1, src_len, 1) encoder_outputs = encoder_outputs.permute(1, 0, 2) energy", "= embedded.squeeze(0) output = output.squeeze(0) weighted_encoder_rep = weighted_encoder_rep.squeeze(0) output =", "Attention(nn.Module): def __init__(self, enc_hid_dim, dec_hid_dim, attn_dim): super().__init__() self.enc_hid_dim = enc_hid_dim", "self.rnn(rnn_input, decoder_hidden.unsqueeze(0)) embedded = embedded.squeeze(0) output = output.squeeze(0) weighted_encoder_rep =", "weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden, encoder_outputs) rnn_input = torch.cat((embedded, weighted_encoder_rep), dim =", "= a.unsqueeze(1) encoder_outputs = encoder_outputs.permute(1, 0, 2) weighted_encoder_rep = torch.bmm(a,", "output_dim) self.dropout = nn.Dropout(dropout) def _weighted_encoder_rep(self, decoder_hidden, encoder_outputs): a =", "hidden, encoder_outputs) outputs[t] = output teacher_force = random.random() < teacher_forcing_ratio", "max_len): output, hidden = self.decoder(output, hidden, encoder_outputs) outputs[t] = output", "decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0)) embedded = embedded.squeeze(0) output = output.squeeze(0)", "= output_dim self.dropout = dropout self.attention = attention self.embedding =", "a.unsqueeze(1) encoder_outputs = encoder_outputs.permute(1, 0, 2) weighted_encoder_rep = torch.bmm(a, encoder_outputs)", "encoder_outputs): a = self.attention(decoder_hidden, encoder_outputs) a = a.unsqueeze(1) encoder_outputs =", "0, 2) return weighted_encoder_rep def forward(self, input, decoder_hidden, encoder_outputs): input", "= self.rnn(rnn_input, decoder_hidden.unsqueeze(0)) embedded = embedded.squeeze(0) output = output.squeeze(0) weighted_encoder_rep", "Decoder(nn.Module): def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention): super().__init__()", "embedded.squeeze(0) output = output.squeeze(0) weighted_encoder_rep = weighted_encoder_rep.squeeze(0) output = self.out(torch.cat((output,", "super().__init__() self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.attn_in = (enc_hid_dim", "= torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))) return outputs, hidden class", "self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.attn_in = (enc_hid_dim *", "self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.dropout = dropout self.embedding", "True) self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim) self.dropout = nn.Dropout(dropout)", "nn.Linear(self.attention.attn_in + emb_dim, output_dim) self.dropout = nn.Dropout(dropout) def _weighted_encoder_rep(self, decoder_hidden,", "weighted_encoder_rep.permute(1, 0, 2) return weighted_encoder_rep def forward(self, input, decoder_hidden, encoder_outputs):", "dec_hid_dim self.attn = nn.Linear(self.attn_in, attn_dim) def forward(self, decoder_hidden, encoder_outputs): src_len", "= torch.cat((embedded, weighted_encoder_rep), dim = 2) output, decoder_hidden = self.rnn(rnn_input,", "= emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.output_dim =", "nn.Embedding(output_dim, emb_dim) self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim)", "input_dim self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim", "self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.output_dim = output_dim self.dropout", "optim import torch.nn.functional as F from torch import Tensor class", "dec_hid_dim, attn_dim): super().__init__() self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.attn_in", "output = trg[0,:] for t in range(1, max_len): output, hidden", "output = self.out(torch.cat((output, weighted_encoder_rep, embedded), dim = 1)) return output,", "= enc_hid_dim self.dec_hid_dim = dec_hid_dim self.dropout = dropout self.embedding =", "should be concatenated hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1)))", "hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))) return outputs, hidden", "encoder self.decoder = decoder self.device = device def forward(self, src,", "as F from torch import Tensor class Encoder(nn.Module): def __init__(self,", "device): super().__init__() self.encoder = encoder self.decoder = decoder self.device =", "input.unsqueeze(0) embedded = self.dropout(self.embedding(input)) weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden, encoder_outputs) rnn_input =", "= nn.Embedding(output_dim, emb_dim) self.rnn = nn.GRU((enc_hid_dim * 2) + emb_dim,", "= torch.bmm(a, encoder_outputs) weighted_encoder_rep = weighted_encoder_rep.permute(1, 0, 2) return weighted_encoder_rep", "= self.rnn(embedded) # output of bi-directional rnn should be concatenated", "= dropout self.attention = attention self.embedding = nn.Embedding(output_dim, emb_dim) self.rnn", "decoder_hidden, encoder_outputs): a = self.attention(decoder_hidden, encoder_outputs) a = a.unsqueeze(1) encoder_outputs", "import random from typing import Tuple import torch import torch.nn", "dim = 1)) return output, decoder_hidden.squeeze(0) class Seq2Seq(nn.Module): def __init__(self,", "attention): super().__init__() self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim =", "self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True) self.fc = nn.Linear(enc_hid_dim", "enc_hid_dim self.dec_hid_dim = dec_hid_dim self.output_dim = output_dim self.dropout = dropout", "nn.Linear(enc_hid_dim * 2, dec_hid_dim) self.dropout = nn.Dropout(dropout) def forward(self, src):", "be concatenated hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))) return", "embedded = embedded.squeeze(0) output = output.squeeze(0) weighted_encoder_rep = weighted_encoder_rep.squeeze(0) output", "max_len = trg.shape[0] trg_vocab_size = self.decoder.output_dim outputs = torch.zeros(max_len, batch_size,", "= decoder self.device = device def forward(self, src, trg, teacher_forcing_ratio=0.5):", "< teacher_forcing_ratio top1 = output.max(1)[1] output = (trg[t] if teacher_force", "= self._weighted_encoder_rep(decoder_hidden, encoder_outputs) rnn_input = torch.cat((embedded, weighted_encoder_rep), dim = 2)", "teacher_force = random.random() < teacher_forcing_ratio top1 = output.max(1)[1] output =", "= dropout self.embedding = nn.Embedding(input_dim, emb_dim) self.rnn = nn.GRU(emb_dim, enc_hid_dim,", "torch.nn.functional as F from torch import Tensor class Encoder(nn.Module): def", "forward(self, src): embedded = self.dropout(self.embedding(src)) outputs, hidden = self.rnn(embedded) #", "enc_hid_dim, dec_hid_dim, attn_dim): super().__init__() self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim", "import Tensor class Encoder(nn.Module): def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim,", "attention self.embedding = nn.Embedding(output_dim, emb_dim) self.rnn = nn.GRU((enc_hid_dim * 2)", "encoder, decoder, device): super().__init__() self.encoder = encoder self.decoder = decoder", "self.dropout = nn.Dropout(dropout) def _weighted_encoder_rep(self, decoder_hidden, encoder_outputs): a = self.attention(decoder_hidden,", "nn.Dropout(dropout) def forward(self, src): embedded = self.dropout(self.embedding(src)) outputs, hidden =", "class Seq2Seq(nn.Module): def __init__(self, encoder, decoder, device): super().__init__() self.encoder =", "self.dec_hid_dim = dec_hid_dim self.dropout = dropout self.embedding = nn.Embedding(input_dim, emb_dim)", "torch import torch.nn as nn import torch.optim as optim import", "return outputs, hidden class Attention(nn.Module): def __init__(self, enc_hid_dim, dec_hid_dim, attn_dim):", "self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.dropout", "__init__(self, enc_hid_dim, dec_hid_dim, attn_dim): super().__init__() self.enc_hid_dim = enc_hid_dim self.dec_hid_dim =", "= 1))) return outputs, hidden class Attention(nn.Module): def __init__(self, enc_hid_dim,", "dec_hid_dim self.output_dim = output_dim self.dropout = dropout self.attention = attention", "decoder_hidden.unsqueeze(0)) embedded = embedded.squeeze(0) output = output.squeeze(0) weighted_encoder_rep = weighted_encoder_rep.squeeze(0)", "= input_dim self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim =", "def _weighted_encoder_rep(self, decoder_hidden, encoder_outputs): a = self.attention(decoder_hidden, encoder_outputs) a =", "enc_hid_dim, dec_hid_dim, dropout, attention): super().__init__() self.emb_dim = emb_dim self.enc_hid_dim =", "= emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.dropout =", "weighted_encoder_rep.squeeze(0) output = self.out(torch.cat((output, weighted_encoder_rep, embedded), dim = 1)) return", "def forward(self, input, decoder_hidden, encoder_outputs): input = input.unsqueeze(0) embedded =", "the decoder is the <sos> token output = trg[0,:] for", "repeated_decoder_hidden, encoder_outputs), dim = 2))) attention = torch.sum(energy, dim=2) return", "random.random() < teacher_forcing_ratio top1 = output.max(1)[1] output = (trg[t] if", "self.decoder.output_dim outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device) encoder_outputs, hidden = self.encoder(src)", "first input to the decoder is the <sos> token output", "output.max(1)[1] output = (trg[t] if teacher_force else top1) return outputs", "dec_hid_dim self.attn_in = (enc_hid_dim * 2) + dec_hid_dim self.attn =", "random from typing import Tuple import torch import torch.nn as", "concatenated hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim = 1))) return outputs,", "encoder_outputs, hidden = self.encoder(src) # first input to the decoder", "outputs, hidden class Attention(nn.Module): def __init__(self, enc_hid_dim, dec_hid_dim, attn_dim): super().__init__()", "output, decoder_hidden.squeeze(0) class Seq2Seq(nn.Module): def __init__(self, encoder, decoder, device): super().__init__()", "self.decoder(output, hidden, encoder_outputs) outputs[t] = output teacher_force = random.random() <", "= encoder self.decoder = decoder self.device = device def forward(self,", "emb_dim, enc_hid_dim, dec_hid_dim, dropout): super().__init__() self.input_dim = input_dim self.emb_dim =", "trg_vocab_size = self.decoder.output_dim outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device) encoder_outputs, hidden", "<sos> token output = trg[0,:] for t in range(1, max_len):", "import torch.nn as nn import torch.optim as optim import torch.nn.functional", "_weighted_encoder_rep(self, decoder_hidden, encoder_outputs): a = self.attention(decoder_hidden, encoder_outputs) a = a.unsqueeze(1)", "teacher_forcing_ratio=0.5): batch_size = src.shape[1] max_len = trg.shape[0] trg_vocab_size = self.decoder.output_dim", "Seq2Seq(nn.Module): def __init__(self, encoder, decoder, device): super().__init__() self.encoder = encoder", "= dec_hid_dim self.output_dim = output_dim self.dropout = dropout self.attention =", "= self.dropout(self.embedding(src)) outputs, hidden = self.rnn(embedded) # output of bi-directional", "nn import torch.optim as optim import torch.nn.functional as F from", "__init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout): super().__init__() self.input_dim = input_dim", "top1 = output.max(1)[1] output = (trg[t] if teacher_force else top1)", "= input.unsqueeze(0) embedded = self.dropout(self.embedding(input)) weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden, encoder_outputs) rnn_input", "= enc_hid_dim self.dec_hid_dim = dec_hid_dim self.attn_in = (enc_hid_dim * 2)", "+ dec_hid_dim self.attn = nn.Linear(self.attn_in, attn_dim) def forward(self, decoder_hidden, encoder_outputs):", "dropout): super().__init__() self.input_dim = input_dim self.emb_dim = emb_dim self.enc_hid_dim =", "def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention): super().__init__() self.emb_dim", "src, trg, teacher_forcing_ratio=0.5): batch_size = src.shape[1] max_len = trg.shape[0] trg_vocab_size", "= src.shape[1] max_len = trg.shape[0] trg_vocab_size = self.decoder.output_dim outputs =", "enc_hid_dim, dec_hid_dim, dropout): super().__init__() self.input_dim = input_dim self.emb_dim = emb_dim", "return output, decoder_hidden.squeeze(0) class Seq2Seq(nn.Module): def __init__(self, encoder, decoder, device):", "forward(self, src, trg, teacher_forcing_ratio=0.5): batch_size = src.shape[1] max_len = trg.shape[0]", "2) return weighted_encoder_rep def forward(self, input, decoder_hidden, encoder_outputs): input =", "repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len, 1) encoder_outputs = encoder_outputs.permute(1, 0, 2)", "dec_hid_dim, dropout, attention): super().__init__() self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim", "decoder, device): super().__init__() self.encoder = encoder self.decoder = decoder self.device", "trg.shape[0] trg_vocab_size = self.decoder.output_dim outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device) encoder_outputs,", "attention = torch.sum(energy, dim=2) return F.softmax(attention, dim=1) class Decoder(nn.Module): def", "enc_hid_dim, bidirectional = True) self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim)", "= attention self.embedding = nn.Embedding(output_dim, emb_dim) self.rnn = nn.GRU((enc_hid_dim *", "= nn.GRU(emb_dim, enc_hid_dim, bidirectional = True) self.fc = nn.Linear(enc_hid_dim *", "F from torch import Tensor class Encoder(nn.Module): def __init__(self, input_dim,", "self.embedding = nn.Embedding(output_dim, emb_dim) self.rnn = nn.GRU((enc_hid_dim * 2) +", "dropout, attention): super().__init__() self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim", "0, 2) energy = torch.tanh(self.attn(torch.cat(( repeated_decoder_hidden, encoder_outputs), dim = 2)))", "encoder_outputs.shape[0] repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len, 1) encoder_outputs = encoder_outputs.permute(1, 0,", "torch import Tensor class Encoder(nn.Module): def __init__(self, input_dim, emb_dim, enc_hid_dim,", "dim=2) return F.softmax(attention, dim=1) class Decoder(nn.Module): def __init__(self, output_dim, emb_dim,", "hidden[-1,:,:]), dim = 1))) return outputs, hidden class Attention(nn.Module): def", "1)) return output, decoder_hidden.squeeze(0) class Seq2Seq(nn.Module): def __init__(self, encoder, decoder,", "1) encoder_outputs = encoder_outputs.permute(1, 0, 2) energy = torch.tanh(self.attn(torch.cat(( repeated_decoder_hidden,", "attn_dim) def forward(self, decoder_hidden, encoder_outputs): src_len = encoder_outputs.shape[0] repeated_decoder_hidden =", "as optim import torch.nn.functional as F from torch import Tensor", "self.dropout(self.embedding(src)) outputs, hidden = self.rnn(embedded) # output of bi-directional rnn", "typing import Tuple import torch import torch.nn as nn import", "dropout self.attention = attention self.embedding = nn.Embedding(output_dim, emb_dim) self.rnn =", "hidden = self.rnn(embedded) # output of bi-directional rnn should be", "+ emb_dim, dec_hid_dim) self.out = nn.Linear(self.attention.attn_in + emb_dim, output_dim) self.dropout", "__init__(self, encoder, decoder, device): super().__init__() self.encoder = encoder self.decoder =", "output = output.squeeze(0) weighted_encoder_rep = weighted_encoder_rep.squeeze(0) output = self.out(torch.cat((output, weighted_encoder_rep,", "rnn should be concatenated hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim =", "= output teacher_force = random.random() < teacher_forcing_ratio top1 = output.max(1)[1]", "src_len = encoder_outputs.shape[0] repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len, 1) encoder_outputs =", "# first input to the decoder is the <sos> token", "torch.cat((embedded, weighted_encoder_rep), dim = 2) output, decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0))", "hidden class Attention(nn.Module): def __init__(self, enc_hid_dim, dec_hid_dim, attn_dim): super().__init__() self.enc_hid_dim", "= self.out(torch.cat((output, weighted_encoder_rep, embedded), dim = 1)) return output, decoder_hidden.squeeze(0)", "class Decoder(nn.Module): def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention):", "a = a.unsqueeze(1) encoder_outputs = encoder_outputs.permute(1, 0, 2) weighted_encoder_rep =", "= self.decoder.output_dim outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device) encoder_outputs, hidden =", "dim = 1))) return outputs, hidden class Attention(nn.Module): def __init__(self,", "self.device = device def forward(self, src, trg, teacher_forcing_ratio=0.5): batch_size =", "= random.random() < teacher_forcing_ratio top1 = output.max(1)[1] output = (trg[t]", "torch.sum(energy, dim=2) return F.softmax(attention, dim=1) class Decoder(nn.Module): def __init__(self, output_dim,", "energy = torch.tanh(self.attn(torch.cat(( repeated_decoder_hidden, encoder_outputs), dim = 2))) attention =", "= 2))) attention = torch.sum(energy, dim=2) return F.softmax(attention, dim=1) class", "Encoder(nn.Module): def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout): super().__init__() self.input_dim", "self._weighted_encoder_rep(decoder_hidden, encoder_outputs) rnn_input = torch.cat((embedded, weighted_encoder_rep), dim = 2) output,", "input = input.unsqueeze(0) embedded = self.dropout(self.embedding(input)) weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden, encoder_outputs)", "self.attention(decoder_hidden, encoder_outputs) a = a.unsqueeze(1) encoder_outputs = encoder_outputs.permute(1, 0, 2)", "output of bi-directional rnn should be concatenated hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:],", "= nn.Linear(self.attn_in, attn_dim) def forward(self, decoder_hidden, encoder_outputs): src_len = encoder_outputs.shape[0]", "enc_hid_dim self.dec_hid_dim = dec_hid_dim self.attn_in = (enc_hid_dim * 2) +", "dim = 2))) attention = torch.sum(energy, dim=2) return F.softmax(attention, dim=1)", "0, 2) weighted_encoder_rep = torch.bmm(a, encoder_outputs) weighted_encoder_rep = weighted_encoder_rep.permute(1, 0,", "= nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim) self.out = nn.Linear(self.attention.attn_in", "= 1)) return output, decoder_hidden.squeeze(0) class Seq2Seq(nn.Module): def __init__(self, encoder,", "for t in range(1, max_len): output, hidden = self.decoder(output, hidden,", "as nn import torch.optim as optim import torch.nn.functional as F", "is the <sos> token output = trg[0,:] for t in", "nn.GRU((enc_hid_dim * 2) + emb_dim, dec_hid_dim) self.out = nn.Linear(self.attention.attn_in +", "encoder_outputs = encoder_outputs.permute(1, 0, 2) energy = torch.tanh(self.attn(torch.cat(( repeated_decoder_hidden, encoder_outputs),", "def forward(self, decoder_hidden, encoder_outputs): src_len = encoder_outputs.shape[0] repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1,", "nn.Dropout(dropout) def _weighted_encoder_rep(self, decoder_hidden, encoder_outputs): a = self.attention(decoder_hidden, encoder_outputs) a", "super().__init__() self.encoder = encoder self.decoder = decoder self.device = device", "= encoder_outputs.permute(1, 0, 2) energy = torch.tanh(self.attn(torch.cat(( repeated_decoder_hidden, encoder_outputs), dim", "= encoder_outputs.shape[0] repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len, 1) encoder_outputs = encoder_outputs.permute(1,", "super().__init__() self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim", "import torch.optim as optim import torch.nn.functional as F from torch", "torch.nn as nn import torch.optim as optim import torch.nn.functional as", "2) + dec_hid_dim self.attn = nn.Linear(self.attn_in, attn_dim) def forward(self, decoder_hidden,", "encoder_outputs.permute(1, 0, 2) energy = torch.tanh(self.attn(torch.cat(( repeated_decoder_hidden, encoder_outputs), dim =", "decoder_hidden.unsqueeze(1).repeat(1, src_len, 1) encoder_outputs = encoder_outputs.permute(1, 0, 2) energy =", "* 2) + dec_hid_dim self.attn = nn.Linear(self.attn_in, attn_dim) def forward(self,", "super().__init__() self.input_dim = input_dim self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim", "nn.Embedding(input_dim, emb_dim) self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True) self.fc", "self.out(torch.cat((output, weighted_encoder_rep, embedded), dim = 1)) return output, decoder_hidden.squeeze(0) class", "self.out = nn.Linear(self.attention.attn_in + emb_dim, output_dim) self.dropout = nn.Dropout(dropout) def", "self.attn = nn.Linear(self.attn_in, attn_dim) def forward(self, decoder_hidden, encoder_outputs): src_len =", "= nn.Dropout(dropout) def forward(self, src): embedded = self.dropout(self.embedding(src)) outputs, hidden", "import torch import torch.nn as nn import torch.optim as optim", "return F.softmax(attention, dim=1) class Decoder(nn.Module): def __init__(self, output_dim, emb_dim, enc_hid_dim,", "def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout): super().__init__() self.input_dim =", "self.dropout(self.embedding(input)) weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden, encoder_outputs) rnn_input = torch.cat((embedded, weighted_encoder_rep), dim", "output.squeeze(0) weighted_encoder_rep = weighted_encoder_rep.squeeze(0) output = self.out(torch.cat((output, weighted_encoder_rep, embedded), dim", "encoder_outputs): input = input.unsqueeze(0) embedded = self.dropout(self.embedding(input)) weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden,", "encoder_outputs = encoder_outputs.permute(1, 0, 2) weighted_encoder_rep = torch.bmm(a, encoder_outputs) weighted_encoder_rep", "= enc_hid_dim self.dec_hid_dim = dec_hid_dim self.output_dim = output_dim self.dropout =", "of bi-directional rnn should be concatenated hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]),", "input to the decoder is the <sos> token output =", "the <sos> token output = trg[0,:] for t in range(1,", "decoder_hidden.squeeze(0) class Seq2Seq(nn.Module): def __init__(self, encoder, decoder, device): super().__init__() self.encoder", "trg, teacher_forcing_ratio=0.5): batch_size = src.shape[1] max_len = trg.shape[0] trg_vocab_size =", "self.dropout = dropout self.attention = attention self.embedding = nn.Embedding(output_dim, emb_dim)", "decoder_hidden, encoder_outputs): src_len = encoder_outputs.shape[0] repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len, 1)", "output_dim self.dropout = dropout self.attention = attention self.embedding = nn.Embedding(output_dim,", "from typing import Tuple import torch import torch.nn as nn", "bi-directional rnn should be concatenated hidden = torch.tanh(self.fc(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim", "self.dropout = nn.Dropout(dropout) def forward(self, src): embedded = self.dropout(self.embedding(src)) outputs,", "input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout): super().__init__() self.input_dim = input_dim self.emb_dim", "outputs[t] = output teacher_force = random.random() < teacher_forcing_ratio top1 =", "device def forward(self, src, trg, teacher_forcing_ratio=0.5): batch_size = src.shape[1] max_len", "= nn.Linear(enc_hid_dim * 2, dec_hid_dim) self.dropout = nn.Dropout(dropout) def forward(self,", "src): embedded = self.dropout(self.embedding(src)) outputs, hidden = self.rnn(embedded) # output", "output, hidden = self.decoder(output, hidden, encoder_outputs) outputs[t] = output teacher_force", "= torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device) encoder_outputs, hidden = self.encoder(src) # first", "self.dec_hid_dim = dec_hid_dim self.output_dim = output_dim self.dropout = dropout self.attention", "* 2) + emb_dim, dec_hid_dim) self.out = nn.Linear(self.attention.attn_in + emb_dim,", "forward(self, input, decoder_hidden, encoder_outputs): input = input.unsqueeze(0) embedded = self.dropout(self.embedding(input))", "+ emb_dim, output_dim) self.dropout = nn.Dropout(dropout) def _weighted_encoder_rep(self, decoder_hidden, encoder_outputs):", "# output of bi-directional rnn should be concatenated hidden =", "decoder self.device = device def forward(self, src, trg, teacher_forcing_ratio=0.5): batch_size", "* 2, dec_hid_dim) self.dropout = nn.Dropout(dropout) def forward(self, src): embedded", "def forward(self, src): embedded = self.dropout(self.embedding(src)) outputs, hidden = self.rnn(embedded)", "hidden = self.encoder(src) # first input to the decoder is", "forward(self, decoder_hidden, encoder_outputs): src_len = encoder_outputs.shape[0] repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len,", "encoder_outputs) weighted_encoder_rep = weighted_encoder_rep.permute(1, 0, 2) return weighted_encoder_rep def forward(self,", "(enc_hid_dim * 2) + dec_hid_dim self.attn = nn.Linear(self.attn_in, attn_dim) def", "output teacher_force = random.random() < teacher_forcing_ratio top1 = output.max(1)[1] output", "outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device) encoder_outputs, hidden = self.encoder(src) #", "dim = 2) output, decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0)) embedded =", "emb_dim, output_dim) self.dropout = nn.Dropout(dropout) def _weighted_encoder_rep(self, decoder_hidden, encoder_outputs): a", "= 2) output, decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0)) embedded = embedded.squeeze(0)", "= self.dropout(self.embedding(input)) weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden, encoder_outputs) rnn_input = torch.cat((embedded, weighted_encoder_rep),", "token output = trg[0,:] for t in range(1, max_len): output,", "encoder_outputs) a = a.unsqueeze(1) encoder_outputs = encoder_outputs.permute(1, 0, 2) weighted_encoder_rep", "dec_hid_dim self.dropout = dropout self.embedding = nn.Embedding(input_dim, emb_dim) self.rnn =", "weighted_encoder_rep, embedded), dim = 1)) return output, decoder_hidden.squeeze(0) class Seq2Seq(nn.Module):", "weighted_encoder_rep = weighted_encoder_rep.squeeze(0) output = self.out(torch.cat((output, weighted_encoder_rep, embedded), dim =", "embedded = self.dropout(self.embedding(input)) weighted_encoder_rep = self._weighted_encoder_rep(decoder_hidden, encoder_outputs) rnn_input = torch.cat((embedded,", "self.input_dim = input_dim self.emb_dim = emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim", "def __init__(self, enc_hid_dim, dec_hid_dim, attn_dim): super().__init__() self.enc_hid_dim = enc_hid_dim self.dec_hid_dim", "= torch.sum(energy, dim=2) return F.softmax(attention, dim=1) class Decoder(nn.Module): def __init__(self,", "= self.encoder(src) # first input to the decoder is the", "batch_size, trg_vocab_size).to(self.device) encoder_outputs, hidden = self.encoder(src) # first input to", "trg[0,:] for t in range(1, max_len): output, hidden = self.decoder(output,", "2) energy = torch.tanh(self.attn(torch.cat(( repeated_decoder_hidden, encoder_outputs), dim = 2))) attention", "teacher_forcing_ratio top1 = output.max(1)[1] output = (trg[t] if teacher_force else", "bidirectional = True) self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim) self.dropout", "= trg.shape[0] trg_vocab_size = self.decoder.output_dim outputs = torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device)", "attn_dim): super().__init__() self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.attn_in =", "output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention): super().__init__() self.emb_dim = emb_dim", "embedded = self.dropout(self.embedding(src)) outputs, hidden = self.rnn(embedded) # output of", "encoder_outputs): src_len = encoder_outputs.shape[0] repeated_decoder_hidden = decoder_hidden.unsqueeze(1).repeat(1, src_len, 1) encoder_outputs", "outputs, hidden = self.rnn(embedded) # output of bi-directional rnn should", "self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim) self.dropout = nn.Dropout(dropout) def", "__init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention): super().__init__() self.emb_dim =", "class Attention(nn.Module): def __init__(self, enc_hid_dim, dec_hid_dim, attn_dim): super().__init__() self.enc_hid_dim =", "2))) attention = torch.sum(energy, dim=2) return F.softmax(attention, dim=1) class Decoder(nn.Module):", "emb_dim) self.rnn = nn.GRU(emb_dim, enc_hid_dim, bidirectional = True) self.fc =", "= output.squeeze(0) weighted_encoder_rep = weighted_encoder_rep.squeeze(0) output = self.out(torch.cat((output, weighted_encoder_rep, embedded),", "to the decoder is the <sos> token output = trg[0,:]", "= nn.Linear(self.attention.attn_in + emb_dim, output_dim) self.dropout = nn.Dropout(dropout) def _weighted_encoder_rep(self,", "= self.attention(decoder_hidden, encoder_outputs) a = a.unsqueeze(1) encoder_outputs = encoder_outputs.permute(1, 0,", "2) weighted_encoder_rep = torch.bmm(a, encoder_outputs) weighted_encoder_rep = weighted_encoder_rep.permute(1, 0, 2)", "encoder_outputs) rnn_input = torch.cat((embedded, weighted_encoder_rep), dim = 2) output, decoder_hidden", "emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.output_dim = output_dim", "self.attention = attention self.embedding = nn.Embedding(output_dim, emb_dim) self.rnn = nn.GRU((enc_hid_dim", "self.rnn(embedded) # output of bi-directional rnn should be concatenated hidden", "trg_vocab_size).to(self.device) encoder_outputs, hidden = self.encoder(src) # first input to the", "self.output_dim = output_dim self.dropout = dropout self.attention = attention self.embedding", "self.encoder(src) # first input to the decoder is the <sos>", "range(1, max_len): output, hidden = self.decoder(output, hidden, encoder_outputs) outputs[t] =", "dec_hid_dim, dropout): super().__init__() self.input_dim = input_dim self.emb_dim = emb_dim self.enc_hid_dim", "embedded), dim = 1)) return output, decoder_hidden.squeeze(0) class Seq2Seq(nn.Module): def", "= trg[0,:] for t in range(1, max_len): output, hidden =", "2, dec_hid_dim) self.dropout = nn.Dropout(dropout) def forward(self, src): embedded =", "= dec_hid_dim self.attn_in = (enc_hid_dim * 2) + dec_hid_dim self.attn", "torch.zeros(max_len, batch_size, trg_vocab_size).to(self.device) encoder_outputs, hidden = self.encoder(src) # first input", "torch.bmm(a, encoder_outputs) weighted_encoder_rep = weighted_encoder_rep.permute(1, 0, 2) return weighted_encoder_rep def", "2) output, decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0)) embedded = embedded.squeeze(0) output", "weighted_encoder_rep), dim = 2) output, decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0)) embedded", "batch_size = src.shape[1] max_len = trg.shape[0] trg_vocab_size = self.decoder.output_dim outputs", "self.encoder = encoder self.decoder = decoder self.device = device def", "= output.max(1)[1] output = (trg[t] if teacher_force else top1) return", "self.decoder = decoder self.device = device def forward(self, src, trg,", "= dec_hid_dim self.dropout = dropout self.embedding = nn.Embedding(input_dim, emb_dim) self.rnn", "class Encoder(nn.Module): def __init__(self, input_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout): super().__init__()", "import torch.nn.functional as F from torch import Tensor class Encoder(nn.Module):", "rnn_input = torch.cat((embedded, weighted_encoder_rep), dim = 2) output, decoder_hidden =", "weighted_encoder_rep = torch.bmm(a, encoder_outputs) weighted_encoder_rep = weighted_encoder_rep.permute(1, 0, 2) return", "def forward(self, src, trg, teacher_forcing_ratio=0.5): batch_size = src.shape[1] max_len =", "emb_dim, enc_hid_dim, dec_hid_dim, dropout, attention): super().__init__() self.emb_dim = emb_dim self.enc_hid_dim", "import Tuple import torch import torch.nn as nn import torch.optim", "= True) self.fc = nn.Linear(enc_hid_dim * 2, dec_hid_dim) self.dropout =", "= self.decoder(output, hidden, encoder_outputs) outputs[t] = output teacher_force = random.random()", "= torch.tanh(self.attn(torch.cat(( repeated_decoder_hidden, encoder_outputs), dim = 2))) attention = torch.sum(energy,", "decoder_hidden, encoder_outputs): input = input.unsqueeze(0) embedded = self.dropout(self.embedding(input)) weighted_encoder_rep =", "output, decoder_hidden = self.rnn(rnn_input, decoder_hidden.unsqueeze(0)) embedded = embedded.squeeze(0) output =", "self.dec_hid_dim = dec_hid_dim self.attn_in = (enc_hid_dim * 2) + dec_hid_dim", "hidden = self.decoder(output, hidden, encoder_outputs) outputs[t] = output teacher_force =", "1))) return outputs, hidden class Attention(nn.Module): def __init__(self, enc_hid_dim, dec_hid_dim,", "nn.Linear(self.attn_in, attn_dim) def forward(self, decoder_hidden, encoder_outputs): src_len = encoder_outputs.shape[0] repeated_decoder_hidden", "= (enc_hid_dim * 2) + dec_hid_dim self.attn = nn.Linear(self.attn_in, attn_dim)", "emb_dim, dec_hid_dim) self.out = nn.Linear(self.attention.attn_in + emb_dim, output_dim) self.dropout =", "encoder_outputs) outputs[t] = output teacher_force = random.random() < teacher_forcing_ratio top1", "= nn.Dropout(dropout) def _weighted_encoder_rep(self, decoder_hidden, encoder_outputs): a = self.attention(decoder_hidden, encoder_outputs)", "dim=1) class Decoder(nn.Module): def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim, dropout,", "encoder_outputs), dim = 2))) attention = torch.sum(energy, dim=2) return F.softmax(attention,", "= device def forward(self, src, trg, teacher_forcing_ratio=0.5): batch_size = src.shape[1]", "t in range(1, max_len): output, hidden = self.decoder(output, hidden, encoder_outputs)", "encoder_outputs.permute(1, 0, 2) weighted_encoder_rep = torch.bmm(a, encoder_outputs) weighted_encoder_rep = weighted_encoder_rep.permute(1,", "torch.tanh(self.attn(torch.cat(( repeated_decoder_hidden, encoder_outputs), dim = 2))) attention = torch.sum(energy, dim=2)", "dec_hid_dim) self.out = nn.Linear(self.attention.attn_in + emb_dim, output_dim) self.dropout = nn.Dropout(dropout)", "def __init__(self, encoder, decoder, device): super().__init__() self.encoder = encoder self.decoder", "emb_dim self.enc_hid_dim = enc_hid_dim self.dec_hid_dim = dec_hid_dim self.dropout = dropout", "F.softmax(attention, dim=1) class Decoder(nn.Module): def __init__(self, output_dim, emb_dim, enc_hid_dim, dec_hid_dim,", "= weighted_encoder_rep.permute(1, 0, 2) return weighted_encoder_rep def forward(self, input, decoder_hidden,", "self.attn_in = (enc_hid_dim * 2) + dec_hid_dim self.attn = nn.Linear(self.attn_in,", "weighted_encoder_rep = weighted_encoder_rep.permute(1, 0, 2) return weighted_encoder_rep def forward(self, input," ]
[ "len(english.vocab) outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(device) hidden, cell = self.encoder(source)", "self.fc = nn.Linear(hidden_size, output_size) def forward(self, x, hidden, cell): #", "input_size, embedding_size, hidden_size, output_size, num_layers, p ): super(Decoder, self).__init__() self.dropout", "num_layers = 2 enc_dropout = 0.5 dec_dropout = 0.5 #", "token x = target[0] for t in range(1, target_len): #", "= Field(tokenize=tokenize_ger, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\") english = Field( tokenize=tokenize_eng, lower=True,", "output, hidden, cell = self.decoder(x, hidden, cell) # Store next", "form. For example if we have MNIST we want to", "sentence x = x.unsqueeze(0) embedding = self.dropout(self.embedding(x)) # embedding shape:", "and testing time, if teacher forcing is 1 # then", "= self.rnn(embedding, (hidden, cell)) # outputs shape: (1, N, hidden_size)", "optim.Adam(model.parameters(), lr=learning_rate) pad_idx = english.vocab.stoi[\"<pad>\"] criterion = nn.CrossEntropyLoss(ignore_index=pad_idx) if load_model:", "utils import translate_sentence, bleu, save_checkpoint, load_checkpoint spacy_ger = spacy.load(\"de\") spacy_eng", "loss = criterion(output, target) # Back prop loss.backward() # Clip", "output # Get the best word the Decoder predicted (index", "be completely different than what the # network is used", "# Back prop loss.backward() # Clip to avoid exploding gradient", "input_size_decoder = len(english.vocab) output_size = len(english.vocab) encoder_embedding_size = 300 decoder_embedding_size", "* batch_size that we want to send in into #", "= target.shape[0] target_vocab_size = len(english.vocab) outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(device)", "N is batch size embedding = self.dropout(self.embedding(x)) # embedding shape:", "epoch in range(num_epochs): print(f\"[Epoch {epoch} / {num_epochs}]\") checkpoint = {\"state_dict\":", "import random from torch.utils.tensorboard import SummaryWriter # to print to", "embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) def forward(self, x):", "sending in a single word and not a sentence x", "cell) = self.rnn(embedding, (hidden, cell)) # outputs shape: (1, N,", "cell) # Store next output prediction outputs[t] = output #", "model(inp_data, target) # Output is of shape (trg_len, batch_size, output_dim)", "into # our cost function, so we need to do", "take the actual next word # otherwise we take the", "for batch_idx, batch in enumerate(train_iterator): # Get input and targets", "exts=(\".de\", \".en\"), fields=(german, english) ) german.build_vocab(train_data, max_size=10000, min_freq=2) english.build_vocab(train_data, max_size=10000,", "(hidden, cell)) # outputs shape: (1, N, hidden_size) predictions =", "Plot to tensorboard writer.add_scalar(\"Training loss\", loss, global_step=step) step += 1", "are sending in a single word and not a sentence", "in a single word and not a sentence x =", ") print(f\"Translated example sentence: \\n {translated_sentence}\") model.train() for batch_idx, batch", "want it to be (1, N), seq_length # is 1", "# Training hyperparameters num_epochs = 100 learning_rate = 0.001 batch_size", "= SummaryWriter(f\"runs/loss_plot\") step = 0 train_iterator, valid_iterator, test_iterator = BucketIterator.splits(", "import SummaryWriter # to print to tensorboard from utils import", "N, hidden_size) return hidden, cell class Decoder(nn.Module): def __init__( self,", "hidden_size, output_size, num_layers, dec_dropout, ).to(device) model = Seq2Seq(encoder_net, decoder_net).to(device) optimizer", "tensorboard from utils import translate_sentence, bleu, save_checkpoint, load_checkpoint spacy_ger =", "if teacher forcing is 1 # then inputs at test", "Field(tokenize=tokenize_ger, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\") english = Field( tokenize=tokenize_eng, lower=True, init_token=\"<sos>\",", "it to # loss function we want it to be", "super(Encoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size = hidden_size self.num_layers =", "is 1 # then inputs at test time might be", "100 learning_rate = 0.001 batch_size = 64 # Model hyperparameters", "männern darauf wird von einem großen pferdegespann ans ufer gezogen.\"", "shape: (1, N, length_target_vocabulary) to send it to # loss", "nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) self.fc = nn.Linear(hidden_size, output_size) def forward(self,", "= BucketIterator.splits( (train_data, valid_data, test_data), batch_size=batch_size, sort_within_batch=True, sort_key=lambda x: len(x.src),", "= self.encoder(source) # Grab the first input to the Decoder", "self.fc(outputs) # predictions shape: (1, N, length_target_vocabulary) to send it", "the actual next word # otherwise we take the word", "start token while we're at it output = output[1:].reshape(-1, output.shape[2])", "300 decoder_embedding_size = 300 hidden_size = 1024 # Needs to", "time, if teacher forcing is 1 # then inputs at", "return hidden, cell class Decoder(nn.Module): def __init__( self, input_size, embedding_size,", "loss.backward() # Clip to avoid exploding gradient issues, makes sure", "gradient issues, makes sure grads are # within a healthy", "input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout ).to(device) decoder_net = Decoder( input_size_decoder,", "save_checkpoint, load_checkpoint spacy_ger = spacy.load(\"de\") spacy_eng = spacy.load(\"en\") def tokenize_ger(text):", "import Field, BucketIterator import numpy as np import spacy import", "# x shape: (seq_length, N) where N is batch size", "what the # network is used to. This was a", "shape: (seq_length, N, hidden_size) return hidden, cell class Decoder(nn.Module): def", "print(f\"Translated example sentence: \\n {translated_sentence}\") model.train() for batch_idx, batch in", "lower=True, init_token=\"<sos>\", eos_token=\"<eos>\" ) train_data, valid_data, test_data = Multi30k.splits( exts=(\".de\",", "from utils import translate_sentence, bleu, save_checkpoint, load_checkpoint spacy_ger = spacy.load(\"de\")", "torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") input_size_encoder = len(german.vocab) input_size_decoder =", "hidden_size) return hidden, cell class Decoder(nn.Module): def __init__( self, input_size,", "torch.optim as optim from torchtext.datasets import Multi30k from torchtext.data import", "a sentence x = x.unsqueeze(0) embedding = self.dropout(self.embedding(x)) # embedding", "be. # Teacher Forcing is used so that the model", "großen pferdegespann ans ufer gezogen.\" for epoch in range(num_epochs): print(f\"[Epoch", "healthy range torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) # Gradient descent step optimizer.step() #", "for tok in spacy_ger.tokenizer(text)] def tokenize_eng(text): return [tok.text for tok", "spacy import random from torch.utils.tensorboard import SummaryWriter # to print", "function we want it to be (N, length_target_vocabulary) so we're", "mit mehreren männern darauf wird von einem großen pferdegespann ans", "{\"state_dict\": model.state_dict(), \"optimizer\": optimizer.state_dict()} save_checkpoint(checkpoint) model.eval() translated_sentence = translate_sentence( model,", "dim predictions = predictions.squeeze(0) return predictions, hidden, cell class Seq2Seq(nn.Module):", "torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) # Gradient descent step optimizer.step() # Plot to", "def tokenize_eng(text): return [tok.text for tok in spacy_eng.tokenizer(text)] german =", "will be <SOS> token x = target[0] for t in", "# Forward prop output = model(inp_data, target) # Output is", "prop output = model(inp_data, target) # Output is of shape", "german, english, device, max_length=50 ) print(f\"Translated example sentence: \\n {translated_sentence}\")", "max_size=10000, min_freq=2) english.build_vocab(train_data, max_size=10000, min_freq=2) class Encoder(nn.Module): def __init__(self, input_size,", "# outputs shape: (seq_length, N, hidden_size) return hidden, cell class", "and not a sentence x = x.unsqueeze(0) embedding = self.dropout(self.embedding(x))", "be: (N, 10) and targets just (N). Here we can", "target = batch.trg.to(device) # Forward prop output = model(inp_data, target)", "self.hidden_size = hidden_size self.num_layers = num_layers self.embedding = nn.Embedding(input_size, embedding_size)", "embedding = self.dropout(self.embedding(x)) # embedding shape: (1, N, embedding_size) outputs,", "for epoch in range(num_epochs): print(f\"[Epoch {epoch} / {num_epochs}]\") checkpoint =", "range torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) # Gradient descent step optimizer.step() # Plot", "for tok in spacy_eng.tokenizer(text)] german = Field(tokenize=tokenize_ger, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\")", "= english.vocab.stoi[\"<pad>\"] criterion = nn.CrossEntropyLoss(ignore_index=pad_idx) if load_model: load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"), model, optimizer)", "we want to send in into # our cost function,", "x shape: (N) where N is for batch size, we", "encoder, decoder): super(Seq2Seq, self).__init__() self.encoder = encoder self.decoder = decoder", "< teacher_force_ratio else best_guess return outputs ### We're ready to", "time might be completely different than what the # network", "tok in spacy_ger.tokenizer(text)] def tokenize_eng(text): return [tok.text for tok in", "be <SOS> token x = target[0] for t in range(1,", "to tensorboard from utils import translate_sentence, bleu, save_checkpoint, load_checkpoint spacy_ger", "= 1024 # Needs to be the same for both", "numpy as np import spacy import random from torch.utils.tensorboard import", "target[1:].reshape(-1) optimizer.zero_grad() loss = criterion(output, target) # Back prop loss.backward()", "the Decoder which will be <SOS> token x = target[0]", "(index in the vocabulary) best_guess = output.argmax(1) # With probability", "nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) def forward(self,", "might be completely different than what the # network is", "else \"cpu\") input_size_encoder = len(german.vocab) input_size_decoder = len(english.vocab) output_size =", "take the word that the Decoder predicted it to be.", "__init__(self, input_size, embedding_size, hidden_size, num_layers, p): super(Encoder, self).__init__() self.dropout =", "different than what the # network is used to. This", "test_data), batch_size=batch_size, sort_within_batch=True, sort_key=lambda x: len(x.src), device=device, ) encoder_net =", "torch.nn as nn import torch.optim as optim from torchtext.datasets import", "size embedding = self.dropout(self.embedding(x)) # embedding shape: (seq_length, N, embedding_size)", "= len(english.vocab) encoder_embedding_size = 300 decoder_embedding_size = 300 hidden_size =", "optimizer) sentence = \"ein boot mit mehreren männern darauf wird", "teacher_force_ratio we take the actual next word # otherwise we", "translated_sentence = translate_sentence( model, sentence, german, english, device, max_length=50 )", "inp_data = batch.src.to(device) target = batch.trg.to(device) # Forward prop output", "example if we have MNIST we want to have #", "(hidden, cell) = self.rnn(embedding, (hidden, cell)) # outputs shape: (1,", "num_layers, dropout=p) self.fc = nn.Linear(hidden_size, output_size) def forward(self, x, hidden,", "# our cost function, so we need to do some", "model.eval() translated_sentence = translate_sentence( model, sentence, german, english, device, max_length=50", "and get to cuda inp_data = batch.src.to(device) target = batch.trg.to(device)", "# Get input and targets and get to cuda inp_data", "first dim predictions = predictions.squeeze(0) return predictions, hidden, cell class", "of shape (trg_len, batch_size, output_dim) but Cross Entropy Loss #", "batch.src.to(device) target = batch.trg.to(device) # Forward prop output = model(inp_data,", "# doesn't take input in that form. For example if", "Encoder( input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout ).to(device) decoder_net = Decoder(", "hidden, cell class Seq2Seq(nn.Module): def __init__(self, encoder, decoder): super(Seq2Seq, self).__init__()", "target) # Back prop loss.backward() # Clip to avoid exploding", "embedding_size) outputs, (hidden, cell) = self.rnn(embedding) # outputs shape: (seq_length,", "def forward(self, x, hidden, cell): # x shape: (N) where", "as optim from torchtext.datasets import Multi30k from torchtext.data import Field,", "we take the word that the Decoder predicted it to", "max_length=50 ) print(f\"Translated example sentence: \\n {translated_sentence}\") model.train() for batch_idx,", "predicted it to be. # Teacher Forcing is used so", "shape: (N) where N is for batch size, we want", "(1, N, hidden_size) predictions = self.fc(outputs) # predictions shape: (1,", "load_model = False device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")", "tokenize=tokenize_eng, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\" ) train_data, valid_data, test_data = Multi30k.splits(", "import numpy as np import spacy import random from torch.utils.tensorboard", "enc_dropout = 0.5 dec_dropout = 0.5 # Tensorboard to get", "source, target, teacher_force_ratio=0.5): batch_size = source.shape[1] target_len = target.shape[0] target_vocab_size", "we want it to be (N, length_target_vocabulary) so we're #", "mehreren männern darauf wird von einem großen pferdegespann ans ufer", "len(german.vocab) input_size_decoder = len(english.vocab) output_size = len(english.vocab) encoder_embedding_size = 300", "### # Training hyperparameters num_epochs = 100 learning_rate = 0.001", "tensorboard writer.add_scalar(\"Training loss\", loss, global_step=step) step += 1 score =", "our cost function, so we need to do some reshapin.", "self.embedding = nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p)", "Loss # doesn't take input in that form. For example", "hidden_size, output_size, num_layers, p ): super(Decoder, self).__init__() self.dropout = nn.Dropout(p)", "x.unsqueeze(0) embedding = self.dropout(self.embedding(x)) # embedding shape: (1, N, embedding_size)", "# Get the best word the Decoder predicted (index in", "init_token=\"<sos>\", eos_token=\"<eos>\" ) train_data, valid_data, test_data = Multi30k.splits( exts=(\".de\", \".en\"),", "gezogen.\" for epoch in range(num_epochs): print(f\"[Epoch {epoch} / {num_epochs}]\") checkpoint", "gonna remove the first dim predictions = predictions.squeeze(0) return predictions,", "\"ein boot mit mehreren männern darauf wird von einem großen", "model.state_dict(), \"optimizer\": optimizer.state_dict()} save_checkpoint(checkpoint) model.eval() translated_sentence = translate_sentence( model, sentence,", "loss\", loss, global_step=step) step += 1 score = bleu(test_data[1:100], model,", "self.encoder = encoder self.decoder = decoder def forward(self, source, target,", "word and not a sentence x = x.unsqueeze(0) embedding =", "send it to # loss function we want it to", "lower=True, init_token=\"<sos>\", eos_token=\"<eos>\") english = Field( tokenize=tokenize_eng, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\"", "Let's also remove the start token while we're at it", "= encoder self.decoder = decoder def forward(self, source, target, teacher_force_ratio=0.5):", "nn.Linear(hidden_size, output_size) def forward(self, x, hidden, cell): # x shape:", "return outputs ### We're ready to define everything we need", "cell = self.encoder(source) # Grab the first input to the", "= 300 decoder_embedding_size = 300 hidden_size = 1024 # Needs", "RNN's num_layers = 2 enc_dropout = 0.5 dec_dropout = 0.5", "encoder_embedding_size, hidden_size, num_layers, enc_dropout ).to(device) decoder_net = Decoder( input_size_decoder, decoder_embedding_size,", "x = x.unsqueeze(0) embedding = self.dropout(self.embedding(x)) # embedding shape: (1,", "model = Seq2Seq(encoder_net, decoder_net).to(device) optimizer = optim.Adam(model.parameters(), lr=learning_rate) pad_idx =", "also remove the start token while we're at it output", "(hidden, cell) = self.rnn(embedding) # outputs shape: (seq_length, N, hidden_size)", "= self.fc(outputs) # predictions shape: (1, N, length_target_vocabulary) to send", "word the Decoder predicted (index in the vocabulary) best_guess =", "we want it to be (1, N), seq_length # is", "import torch.optim as optim from torchtext.datasets import Multi30k from torchtext.data", "target, teacher_force_ratio=0.5): batch_size = source.shape[1] target_len = target.shape[0] target_vocab_size =", "target_len): # Use previous hidden, cell as context from encoder", "Back prop loss.backward() # Clip to avoid exploding gradient issues,", "# to print to tensorboard from utils import translate_sentence, bleu,", "be (1, N), seq_length # is 1 here because we", "model ### # Training hyperparameters num_epochs = 100 learning_rate =", "max_norm=1) # Gradient descent step optimizer.step() # Plot to tensorboard", "batch size embedding = self.dropout(self.embedding(x)) # embedding shape: (seq_length, N,", "num_layers, dec_dropout, ).to(device) model = Seq2Seq(encoder_net, decoder_net).to(device) optimizer = optim.Adam(model.parameters(),", "at test time might be completely different than what the", "so we need to do some reshapin. While we're at", "= self.decoder(x, hidden, cell) # Store next output prediction outputs[t]", "# With probability of teacher_force_ratio we take the actual next", "= len(english.vocab) output_size = len(english.vocab) encoder_embedding_size = 300 decoder_embedding_size =", "similar inputs at training and testing time, if teacher forcing", "embedding shape: (seq_length, N, embedding_size) outputs, (hidden, cell) = self.rnn(embedding)", "testing time, if teacher forcing is 1 # then inputs", "np import spacy import random from torch.utils.tensorboard import SummaryWriter #", ").to(device) model = Seq2Seq(encoder_net, decoder_net).to(device) optimizer = optim.Adam(model.parameters(), lr=learning_rate) pad_idx", "Get input and targets and get to cuda inp_data =", "False device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") input_size_encoder =", "translate_sentence, bleu, save_checkpoint, load_checkpoint spacy_ger = spacy.load(\"de\") spacy_eng = spacy.load(\"en\")", "previous hidden, cell as context from encoder at start output,", "target.shape[0] target_vocab_size = len(english.vocab) outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(device) hidden,", "way that we have output_words * batch_size that we want", "decoder_embedding_size = 300 hidden_size = 1024 # Needs to be", "= self.dropout(self.embedding(x)) # embedding shape: (seq_length, N, embedding_size) outputs, (hidden,", "cell as context from encoder at start output, hidden, cell", "BucketIterator.splits( (train_data, valid_data, test_data), batch_size=batch_size, sort_within_batch=True, sort_key=lambda x: len(x.src), device=device,", "decoder_net).to(device) optimizer = optim.Adam(model.parameters(), lr=learning_rate) pad_idx = english.vocab.stoi[\"<pad>\"] criterion =", "def tokenize_ger(text): return [tok.text for tok in spacy_ger.tokenizer(text)] def tokenize_eng(text):", "in spacy_eng.tokenizer(text)] german = Field(tokenize=tokenize_ger, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\") english =", "input_size, embedding_size, hidden_size, num_layers, p): super(Encoder, self).__init__() self.dropout = nn.Dropout(p)", "be (N, length_target_vocabulary) so we're # just gonna remove the", "do some reshapin. While we're at it # Let's also", "want to send in into # our cost function, so", "Use previous hidden, cell as context from encoder at start", "start output, hidden, cell = self.decoder(x, hidden, cell) # Store", "hyperparameters num_epochs = 100 learning_rate = 0.001 batch_size = 64", "sentence, german, english, device, max_length=50 ) print(f\"Translated example sentence: \\n", "Decoder predicted (index in the vocabulary) best_guess = output.argmax(1) #", "# then inputs at test time might be completely different", "x): # x shape: (seq_length, N) where N is batch", "= translate_sentence( model, sentence, german, english, device, max_length=50 ) print(f\"Translated", "english.vocab.stoi[\"<pad>\"] criterion = nn.CrossEntropyLoss(ignore_index=pad_idx) if load_model: load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"), model, optimizer) sentence", "nn.Dropout(p) self.hidden_size = hidden_size self.num_layers = num_layers self.embedding = nn.Embedding(input_size,", "at it # Let's also remove the start token while", "spacy.load(\"de\") spacy_eng = spacy.load(\"en\") def tokenize_ger(text): return [tok.text for tok", "torchtext.datasets import Multi30k from torchtext.data import Field, BucketIterator import numpy", "load_model: load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"), model, optimizer) sentence = \"ein boot mit mehreren", "batch.trg.to(device) # Forward prop output = model(inp_data, target) # Output", "we need to do some reshapin. While we're at it", "to be: (N, 10) and targets just (N). Here we", "= criterion(output, target) # Back prop loss.backward() # Clip to", "0.5 dec_dropout = 0.5 # Tensorboard to get nice loss", "hidden, cell as context from encoder at start output, hidden,", "0 train_iterator, valid_iterator, test_iterator = BucketIterator.splits( (train_data, valid_data, test_data), batch_size=batch_size,", "self.encoder(source) # Grab the first input to the Decoder which", "target_len = target.shape[0] target_vocab_size = len(english.vocab) outputs = torch.zeros(target_len, batch_size,", "# just gonna remove the first dim predictions = predictions.squeeze(0)", "self).__init__() self.dropout = nn.Dropout(p) self.hidden_size = hidden_size self.num_layers = num_layers", "cell class Seq2Seq(nn.Module): def __init__(self, encoder, decoder): super(Seq2Seq, self).__init__() self.encoder", "outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(device) hidden, cell = self.encoder(source) #", "torch.utils.tensorboard import SummaryWriter # to print to tensorboard from utils", "translate_sentence( model, sentence, german, english, device, max_length=50 ) print(f\"Translated example", "remove the start token while we're at it output =", "step optimizer.step() # Plot to tensorboard writer.add_scalar(\"Training loss\", loss, global_step=step)", "\"cpu\") input_size_encoder = len(german.vocab) input_size_decoder = len(english.vocab) output_size = len(english.vocab)", "Store next output prediction outputs[t] = output # Get the", "target[t] if random.random() < teacher_force_ratio else best_guess return outputs ###", "(train_data, valid_data, test_data), batch_size=batch_size, sort_within_batch=True, sort_key=lambda x: len(x.src), device=device, )", "to. This was a long comment. x = target[t] if", "where N is for batch size, we want it to", "in range(1, target_len): # Use previous hidden, cell as context", "# predictions shape: (1, N, length_target_vocabulary) to send it to", "64 # Model hyperparameters load_model = False device = torch.device(\"cuda\"", "load_checkpoint spacy_ger = spacy.load(\"de\") spacy_eng = spacy.load(\"en\") def tokenize_ger(text): return", "Tensorboard to get nice loss plot writer = SummaryWriter(f\"runs/loss_plot\") step", "output = output[1:].reshape(-1, output.shape[2]) target = target[1:].reshape(-1) optimizer.zero_grad() loss =", "output to be: (N, 10) and targets just (N). Here", "encoder_embedding_size = 300 decoder_embedding_size = 300 hidden_size = 1024 #", "self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) self.fc = nn.Linear(hidden_size, output_size)", "from encoder at start output, hidden, cell = self.decoder(x, hidden,", "(N). Here we can view it in a similar #", "= nn.Dropout(p) self.hidden_size = hidden_size self.num_layers = num_layers self.embedding =", "def __init__( self, input_size, embedding_size, hidden_size, output_size, num_layers, p ):", "N, embedding_size) outputs, (hidden, cell) = self.rnn(embedding) # outputs shape:", "(1, N, embedding_size) outputs, (hidden, cell) = self.rnn(embedding, (hidden, cell))", "next word # otherwise we take the word that the", "a similar # way that we have output_words * batch_size", "# similar inputs at training and testing time, if teacher", "comment. x = target[t] if random.random() < teacher_force_ratio else best_guess", "= target[1:].reshape(-1) optimizer.zero_grad() loss = criterion(output, target) # Back prop", "self.num_layers = num_layers self.embedding = nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size,", "output.shape[2]) target = target[1:].reshape(-1) optimizer.zero_grad() loss = criterion(output, target) #", "vocabulary) best_guess = output.argmax(1) # With probability of teacher_force_ratio we", "our Seq2Seq model ### # Training hyperparameters num_epochs = 100", "We're ready to define everything we need for training our", "writer = SummaryWriter(f\"runs/loss_plot\") step = 0 train_iterator, valid_iterator, test_iterator =", "in enumerate(train_iterator): # Get input and targets and get to", "the model gets used to seeing # similar inputs at", "= hidden_size self.num_layers = num_layers self.embedding = nn.Embedding(input_size, embedding_size) self.rnn", "max_size=10000, min_freq=2) class Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size, num_layers,", "the Decoder predicted (index in the vocabulary) best_guess = output.argmax(1)", "spacy_ger = spacy.load(\"de\") spacy_eng = spacy.load(\"en\") def tokenize_ger(text): return [tok.text", "completely different than what the # network is used to.", "global_step=step) step += 1 score = bleu(test_data[1:100], model, german, english,", "outputs shape: (1, N, hidden_size) predictions = self.fc(outputs) # predictions", "= source.shape[1] target_len = target.shape[0] target_vocab_size = len(english.vocab) outputs =", "batch_size = source.shape[1] target_len = target.shape[0] target_vocab_size = len(english.vocab) outputs", "we have MNIST we want to have # output to", "than what the # network is used to. This was", "= predictions.squeeze(0) return predictions, hidden, cell class Seq2Seq(nn.Module): def __init__(self,", "{epoch} / {num_epochs}]\") checkpoint = {\"state_dict\": model.state_dict(), \"optimizer\": optimizer.state_dict()} save_checkpoint(checkpoint)", "function, so we need to do some reshapin. While we're", "[tok.text for tok in spacy_ger.tokenizer(text)] def tokenize_eng(text): return [tok.text for", "to define everything we need for training our Seq2Seq model", "the word that the Decoder predicted it to be. #", "is used so that the model gets used to seeing", "makes sure grads are # within a healthy range torch.nn.utils.clip_grad_norm_(model.parameters(),", "test time might be completely different than what the #", "predictions.squeeze(0) return predictions, hidden, cell class Seq2Seq(nn.Module): def __init__(self, encoder,", "(N, 10) and targets just (N). Here we can view", "\"optimizer\": optimizer.state_dict()} save_checkpoint(checkpoint) model.eval() translated_sentence = translate_sentence( model, sentence, german,", "it output = output[1:].reshape(-1, output.shape[2]) target = target[1:].reshape(-1) optimizer.zero_grad() loss", "that form. For example if we have MNIST we want", "# Gradient descent step optimizer.step() # Plot to tensorboard writer.add_scalar(\"Training", "torchtext.data import Field, BucketIterator import numpy as np import spacy", "to tensorboard writer.add_scalar(\"Training loss\", loss, global_step=step) step += 1 score", "target = target[1:].reshape(-1) optimizer.zero_grad() loss = criterion(output, target) # Back", "english) ) german.build_vocab(train_data, max_size=10000, min_freq=2) english.build_vocab(train_data, max_size=10000, min_freq=2) class Encoder(nn.Module):", "are # within a healthy range torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) # Gradient", "shape: (seq_length, N) where N is batch size embedding =", "input_size_decoder, decoder_embedding_size, hidden_size, output_size, num_layers, dec_dropout, ).to(device) model = Seq2Seq(encoder_net,", "valid_data, test_data), batch_size=batch_size, sort_within_batch=True, sort_key=lambda x: len(x.src), device=device, ) encoder_net", "criterion(output, target) # Back prop loss.backward() # Clip to avoid", "is batch size embedding = self.dropout(self.embedding(x)) # embedding shape: (seq_length,", "inputs at test time might be completely different than what", "it to be. # Teacher Forcing is used so that", "we need for training our Seq2Seq model ### # Training", "def __init__(self, encoder, decoder): super(Seq2Seq, self).__init__() self.encoder = encoder self.decoder", "self.dropout = nn.Dropout(p) self.hidden_size = hidden_size self.num_layers = num_layers self.embedding", "target[0] for t in range(1, target_len): # Use previous hidden,", "because we are sending in a single word and not", "= 0.5 dec_dropout = 0.5 # Tensorboard to get nice", "= Multi30k.splits( exts=(\".de\", \".en\"), fields=(german, english) ) german.build_vocab(train_data, max_size=10000, min_freq=2)", "N, length_target_vocabulary) to send it to # loss function we", "for batch size, we want it to be (1, N),", "BucketIterator import numpy as np import spacy import random from", "which will be <SOS> token x = target[0] for t", "cuda inp_data = batch.src.to(device) target = batch.trg.to(device) # Forward prop", "torch import torch.nn as nn import torch.optim as optim from", "hidden, cell = self.decoder(x, hidden, cell) # Store next output", "at it output = output[1:].reshape(-1, output.shape[2]) target = target[1:].reshape(-1) optimizer.zero_grad()", "remove the first dim predictions = predictions.squeeze(0) return predictions, hidden,", "targets just (N). Here we can view it in a", "min_freq=2) english.build_vocab(train_data, max_size=10000, min_freq=2) class Encoder(nn.Module): def __init__(self, input_size, embedding_size,", "return [tok.text for tok in spacy_ger.tokenizer(text)] def tokenize_eng(text): return [tok.text", ") train_data, valid_data, test_data = Multi30k.splits( exts=(\".de\", \".en\"), fields=(german, english)", "+= 1 score = bleu(test_data[1:100], model, german, english, device) print(f\"Bleu", "here because we are sending in a single word and", "the first input to the Decoder which will be <SOS>", "batch size, we want it to be (1, N), seq_length", "outputs[t] = output # Get the best word the Decoder", "This was a long comment. x = target[t] if random.random()", "in range(num_epochs): print(f\"[Epoch {epoch} / {num_epochs}]\") checkpoint = {\"state_dict\": model.state_dict(),", "Seq2Seq model ### # Training hyperparameters num_epochs = 100 learning_rate", "send in into # our cost function, so we need", "def forward(self, x): # x shape: (seq_length, N) where N", "1 # then inputs at test time might be completely", "targets and get to cuda inp_data = batch.src.to(device) target =", "decoder def forward(self, source, target, teacher_force_ratio=0.5): batch_size = source.shape[1] target_len", "define everything we need for training our Seq2Seq model ###", "outputs, (hidden, cell) = self.rnn(embedding) # outputs shape: (seq_length, N,", "length_target_vocabulary) so we're # just gonna remove the first dim", "# Grab the first input to the Decoder which will", "to cuda inp_data = batch.src.to(device) target = batch.trg.to(device) # Forward", "that we have output_words * batch_size that we want to", "if random.random() < teacher_force_ratio else best_guess return outputs ### We're", "to be (1, N), seq_length # is 1 here because", "it to be (N, length_target_vocabulary) so we're # just gonna", "the start token while we're at it output = output[1:].reshape(-1,", "dec_dropout = 0.5 # Tensorboard to get nice loss plot", "= target[t] if random.random() < teacher_force_ratio else best_guess return outputs", "used to seeing # similar inputs at training and testing", "0.001 batch_size = 64 # Model hyperparameters load_model = False", "decoder_embedding_size, hidden_size, output_size, num_layers, dec_dropout, ).to(device) model = Seq2Seq(encoder_net, decoder_net).to(device)", "pferdegespann ans ufer gezogen.\" for epoch in range(num_epochs): print(f\"[Epoch {epoch}", "the # network is used to. This was a long", "spacy.load(\"en\") def tokenize_ger(text): return [tok.text for tok in spacy_ger.tokenizer(text)] def", "### We're ready to define everything we need for training", "ufer gezogen.\" for epoch in range(num_epochs): print(f\"[Epoch {epoch} / {num_epochs}]\")", "a healthy range torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) # Gradient descent step optimizer.step()", "hidden_size = 1024 # Needs to be the same for", "[tok.text for tok in spacy_eng.tokenizer(text)] german = Field(tokenize=tokenize_ger, lower=True, init_token=\"<sos>\",", "get nice loss plot writer = SummaryWriter(f\"runs/loss_plot\") step = 0", "300 hidden_size = 1024 # Needs to be the same", "= 300 hidden_size = 1024 # Needs to be the", "= False device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") input_size_encoder", "= batch.trg.to(device) # Forward prop output = model(inp_data, target) #", "in that form. For example if we have MNIST we", "score = bleu(test_data[1:100], model, german, english, device) print(f\"Bleu score {score*100:.2f}\")", "num_layers, dropout=p) def forward(self, x): # x shape: (seq_length, N)", "len(english.vocab) output_size = len(english.vocab) encoder_embedding_size = 300 decoder_embedding_size = 300", "sentence: \\n {translated_sentence}\") model.train() for batch_idx, batch in enumerate(train_iterator): #", "to send in into # our cost function, so we", "Decoder( input_size_decoder, decoder_embedding_size, hidden_size, output_size, num_layers, dec_dropout, ).to(device) model =", "step = 0 train_iterator, valid_iterator, test_iterator = BucketIterator.splits( (train_data, valid_data,", "for training our Seq2Seq model ### # Training hyperparameters num_epochs", "similar # way that we have output_words * batch_size that", "ans ufer gezogen.\" for epoch in range(num_epochs): print(f\"[Epoch {epoch} /", "and targets just (N). Here we can view it in", "self, input_size, embedding_size, hidden_size, output_size, num_layers, p ): super(Decoder, self).__init__()", "= 2 enc_dropout = 0.5 dec_dropout = 0.5 # Tensorboard", "we're at it # Let's also remove the start token", "english = Field( tokenize=tokenize_eng, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\" ) train_data, valid_data,", "spacy_eng.tokenizer(text)] german = Field(tokenize=tokenize_ger, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\") english = Field(", "def forward(self, source, target, teacher_force_ratio=0.5): batch_size = source.shape[1] target_len =", "batch_size, target_vocab_size).to(device) hidden, cell = self.encoder(source) # Grab the first", "# Use previous hidden, cell as context from encoder at", "= model(inp_data, target) # Output is of shape (trg_len, batch_size,", "num_layers, p): super(Encoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size = hidden_size", "# is 1 here because we are sending in a", "best_guess = output.argmax(1) # With probability of teacher_force_ratio we take", "is used to. This was a long comment. x =", "from torchtext.data import Field, BucketIterator import numpy as np import", "self.dropout(self.embedding(x)) # embedding shape: (seq_length, N, embedding_size) outputs, (hidden, cell)", "is 1 here because we are sending in a single", "doesn't take input in that form. For example if we", "self.rnn(embedding, (hidden, cell)) # outputs shape: (1, N, hidden_size) predictions", "# within a healthy range torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) # Gradient descent", "# x shape: (N) where N is for batch size,", "reshapin. While we're at it # Let's also remove the", "# Tensorboard to get nice loss plot writer = SummaryWriter(f\"runs/loss_plot\")", "optimizer = optim.Adam(model.parameters(), lr=learning_rate) pad_idx = english.vocab.stoi[\"<pad>\"] criterion = nn.CrossEntropyLoss(ignore_index=pad_idx)", ").to(device) decoder_net = Decoder( input_size_decoder, decoder_embedding_size, hidden_size, output_size, num_layers, dec_dropout,", "= batch.src.to(device) target = batch.trg.to(device) # Forward prop output =", "p): super(Encoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size = hidden_size self.num_layers", "dropout=p) self.fc = nn.Linear(hidden_size, output_size) def forward(self, x, hidden, cell):", "self).__init__() self.encoder = encoder self.decoder = decoder def forward(self, source,", "Seq2Seq(nn.Module): def __init__(self, encoder, decoder): super(Seq2Seq, self).__init__() self.encoder = encoder", "\".en\"), fields=(german, english) ) german.build_vocab(train_data, max_size=10000, min_freq=2) english.build_vocab(train_data, max_size=10000, min_freq=2)", "we are sending in a single word and not a", "__init__( self, input_size, embedding_size, hidden_size, output_size, num_layers, p ): super(Decoder,", "criterion = nn.CrossEntropyLoss(ignore_index=pad_idx) if load_model: load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"), model, optimizer) sentence =", "as context from encoder at start output, hidden, cell =", "Decoder predicted it to be. # Teacher Forcing is used", "to avoid exploding gradient issues, makes sure grads are #", "With probability of teacher_force_ratio we take the actual next word", "# Store next output prediction outputs[t] = output # Get", "predictions shape: (1, N, length_target_vocabulary) to send it to #", "# output to be: (N, 10) and targets just (N).", "if torch.cuda.is_available() else \"cpu\") input_size_encoder = len(german.vocab) input_size_decoder = len(english.vocab)", "= torch.zeros(target_len, batch_size, target_vocab_size).to(device) hidden, cell = self.encoder(source) # Grab", "# Teacher Forcing is used so that the model gets", "some reshapin. While we're at it # Let's also remove", "hidden, cell): # x shape: (N) where N is for", "N) where N is batch size embedding = self.dropout(self.embedding(x)) #", "bleu, save_checkpoint, load_checkpoint spacy_ger = spacy.load(\"de\") spacy_eng = spacy.load(\"en\") def", "x: len(x.src), device=device, ) encoder_net = Encoder( input_size_encoder, encoder_embedding_size, hidden_size,", "model, optimizer) sentence = \"ein boot mit mehreren männern darauf", "= Seq2Seq(encoder_net, decoder_net).to(device) optimizer = optim.Adam(model.parameters(), lr=learning_rate) pad_idx = english.vocab.stoi[\"<pad>\"]", "english, device, max_length=50 ) print(f\"Translated example sentence: \\n {translated_sentence}\") model.train()", "encoder self.decoder = decoder def forward(self, source, target, teacher_force_ratio=0.5): batch_size", "nice loss plot writer = SummaryWriter(f\"runs/loss_plot\") step = 0 train_iterator,", "example sentence: \\n {translated_sentence}\") model.train() for batch_idx, batch in enumerate(train_iterator):", "<filename>ML/Pytorch/more_advanced/Seq2Seq/seq2seq.py<gh_stars>1000+ import torch import torch.nn as nn import torch.optim as", "the vocabulary) best_guess = output.argmax(1) # With probability of teacher_force_ratio", "sort_key=lambda x: len(x.src), device=device, ) encoder_net = Encoder( input_size_encoder, encoder_embedding_size,", "predicted (index in the vocabulary) best_guess = output.argmax(1) # With", "Cross Entropy Loss # doesn't take input in that form.", "hidden_size) predictions = self.fc(outputs) # predictions shape: (1, N, length_target_vocabulary)", "embedding = self.dropout(self.embedding(x)) # embedding shape: (seq_length, N, embedding_size) outputs,", "teacher_force_ratio=0.5): batch_size = source.shape[1] target_len = target.shape[0] target_vocab_size = len(english.vocab)", "N, hidden_size) predictions = self.fc(outputs) # predictions shape: (1, N,", "(1, N, length_target_vocabulary) to send it to # loss function", "output prediction outputs[t] = output # Get the best word", "both RNN's num_layers = 2 enc_dropout = 0.5 dec_dropout =", "german.build_vocab(train_data, max_size=10000, min_freq=2) english.build_vocab(train_data, max_size=10000, min_freq=2) class Encoder(nn.Module): def __init__(self,", "= len(german.vocab) input_size_decoder = len(english.vocab) output_size = len(english.vocab) encoder_embedding_size =", "loss plot writer = SummaryWriter(f\"runs/loss_plot\") step = 0 train_iterator, valid_iterator,", "= nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) self.fc", "issues, makes sure grads are # within a healthy range", "= nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) def", "= self.rnn(embedding) # outputs shape: (seq_length, N, hidden_size) return hidden,", "shape: (seq_length, N, embedding_size) outputs, (hidden, cell) = self.rnn(embedding) #", "super(Seq2Seq, self).__init__() self.encoder = encoder self.decoder = decoder def forward(self,", "source.shape[1] target_len = target.shape[0] target_vocab_size = len(english.vocab) outputs = torch.zeros(target_len,", "output.argmax(1) # With probability of teacher_force_ratio we take the actual", "of teacher_force_ratio we take the actual next word # otherwise", "we're at it output = output[1:].reshape(-1, output.shape[2]) target = target[1:].reshape(-1)", "super(Decoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size = hidden_size self.num_layers =", "output_size) def forward(self, x, hidden, cell): # x shape: (N)", "as np import spacy import random from torch.utils.tensorboard import SummaryWriter", "test_iterator = BucketIterator.splits( (train_data, valid_data, test_data), batch_size=batch_size, sort_within_batch=True, sort_key=lambda x:", "= 64 # Model hyperparameters load_model = False device =", "print to tensorboard from utils import translate_sentence, bleu, save_checkpoint, load_checkpoint", "(N) where N is for batch size, we want it", "Teacher Forcing is used so that the model gets used", "target) # Output is of shape (trg_len, batch_size, output_dim) but", "Gradient descent step optimizer.step() # Plot to tensorboard writer.add_scalar(\"Training loss\",", "grads are # within a healthy range torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) #", "a single word and not a sentence x = x.unsqueeze(0)", "length_target_vocabulary) to send it to # loss function we want", "(N, length_target_vocabulary) so we're # just gonna remove the first", "# loss function we want it to be (N, length_target_vocabulary)", "for t in range(1, target_len): # Use previous hidden, cell", "# Plot to tensorboard writer.add_scalar(\"Training loss\", loss, global_step=step) step +=", "else best_guess return outputs ### We're ready to define everything", "nn.CrossEntropyLoss(ignore_index=pad_idx) if load_model: load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"), model, optimizer) sentence = \"ein boot", "otherwise we take the word that the Decoder predicted it", "Get the best word the Decoder predicted (index in the", "teacher_force_ratio else best_guess return outputs ### We're ready to define", "tokenize_eng(text): return [tok.text for tok in spacy_eng.tokenizer(text)] german = Field(tokenize=tokenize_ger,", "For example if we have MNIST we want to have", "we can view it in a similar # way that", "in a similar # way that we have output_words *", "next output prediction outputs[t] = output # Get the best", "torch.cuda.is_available() else \"cpu\") input_size_encoder = len(german.vocab) input_size_decoder = len(english.vocab) output_size", "import torch import torch.nn as nn import torch.optim as optim", "same for both RNN's num_layers = 2 enc_dropout = 0.5", "eos_token=\"<eos>\") english = Field( tokenize=tokenize_eng, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\" ) train_data,", "hidden, cell class Decoder(nn.Module): def __init__( self, input_size, embedding_size, hidden_size,", "Multi30k.splits( exts=(\".de\", \".en\"), fields=(german, english) ) german.build_vocab(train_data, max_size=10000, min_freq=2) english.build_vocab(train_data,", "pad_idx = english.vocab.stoi[\"<pad>\"] criterion = nn.CrossEntropyLoss(ignore_index=pad_idx) if load_model: load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"), model,", "p ): super(Decoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size = hidden_size", "was a long comment. x = target[t] if random.random() <", "is for batch size, we want it to be (1,", "valid_iterator, test_iterator = BucketIterator.splits( (train_data, valid_data, test_data), batch_size=batch_size, sort_within_batch=True, sort_key=lambda", "darauf wird von einem großen pferdegespann ans ufer gezogen.\" for", "output_words * batch_size that we want to send in into", "used so that the model gets used to seeing #", "(seq_length, N) where N is batch size embedding = self.dropout(self.embedding(x))", "and targets and get to cuda inp_data = batch.src.to(device) target", "hidden_size, num_layers, dropout=p) def forward(self, x): # x shape: (seq_length,", "hidden_size, num_layers, enc_dropout ).to(device) decoder_net = Decoder( input_size_decoder, decoder_embedding_size, hidden_size,", "optimizer.step() # Plot to tensorboard writer.add_scalar(\"Training loss\", loss, global_step=step) step", "embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) self.fc = nn.Linear(hidden_size,", "loss function we want it to be (N, length_target_vocabulary) so", "the first dim predictions = predictions.squeeze(0) return predictions, hidden, cell", "MNIST we want to have # output to be: (N,", "= output[1:].reshape(-1, output.shape[2]) target = target[1:].reshape(-1) optimizer.zero_grad() loss = criterion(output,", "model.train() for batch_idx, batch in enumerate(train_iterator): # Get input and", "Field, BucketIterator import numpy as np import spacy import random", "everything we need for training our Seq2Seq model ### #", "model gets used to seeing # similar inputs at training", "batch_idx, batch in enumerate(train_iterator): # Get input and targets and", "to # loss function we want it to be (N,", "view it in a similar # way that we have", "embedding_size) outputs, (hidden, cell) = self.rnn(embedding, (hidden, cell)) # outputs", "cell)) # outputs shape: (1, N, hidden_size) predictions = self.fc(outputs)", "output_size, num_layers, p ): super(Decoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size", "= output # Get the best word the Decoder predicted", "optimizer.zero_grad() loss = criterion(output, target) # Back prop loss.backward() #", "long comment. x = target[t] if random.random() < teacher_force_ratio else", "outputs, (hidden, cell) = self.rnn(embedding, (hidden, cell)) # outputs shape:", "num_layers, enc_dropout ).to(device) decoder_net = Decoder( input_size_decoder, decoder_embedding_size, hidden_size, output_size,", "output_size, num_layers, dec_dropout, ).to(device) model = Seq2Seq(encoder_net, decoder_net).to(device) optimizer =", "# otherwise we take the word that the Decoder predicted", "to print to tensorboard from utils import translate_sentence, bleu, save_checkpoint,", "learning_rate = 0.001 batch_size = 64 # Model hyperparameters load_model", "range(num_epochs): print(f\"[Epoch {epoch} / {num_epochs}]\") checkpoint = {\"state_dict\": model.state_dict(), \"optimizer\":", "= len(english.vocab) outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(device) hidden, cell =", "encoder_net = Encoder( input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout ).to(device) decoder_net", "outputs shape: (seq_length, N, hidden_size) return hidden, cell class Decoder(nn.Module):", "training our Seq2Seq model ### # Training hyperparameters num_epochs =", "len(english.vocab) encoder_embedding_size = 300 decoder_embedding_size = 300 hidden_size = 1024", "wird von einem großen pferdegespann ans ufer gezogen.\" for epoch", "0.5 # Tensorboard to get nice loss plot writer =", "batch in enumerate(train_iterator): # Get input and targets and get", "german = Field(tokenize=tokenize_ger, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\") english = Field( tokenize=tokenize_eng,", "output_dim) but Cross Entropy Loss # doesn't take input in", "network is used to. This was a long comment. x", "target_vocab_size).to(device) hidden, cell = self.encoder(source) # Grab the first input", "init_token=\"<sos>\", eos_token=\"<eos>\") english = Field( tokenize=tokenize_eng, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\" )", "device, max_length=50 ) print(f\"Translated example sentence: \\n {translated_sentence}\") model.train() for", "x, hidden, cell): # x shape: (N) where N is", "= optim.Adam(model.parameters(), lr=learning_rate) pad_idx = english.vocab.stoi[\"<pad>\"] criterion = nn.CrossEntropyLoss(ignore_index=pad_idx) if", "embedding shape: (1, N, embedding_size) outputs, (hidden, cell) = self.rnn(embedding,", "= 0.5 # Tensorboard to get nice loss plot writer", "have output_words * batch_size that we want to send in", "einem großen pferdegespann ans ufer gezogen.\" for epoch in range(num_epochs):", "self.rnn(embedding) # outputs shape: (seq_length, N, hidden_size) return hidden, cell", "min_freq=2) class Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size, num_layers, p):", "from torch.utils.tensorboard import SummaryWriter # to print to tensorboard from", "import translate_sentence, bleu, save_checkpoint, load_checkpoint spacy_ger = spacy.load(\"de\") spacy_eng =", "__init__(self, encoder, decoder): super(Seq2Seq, self).__init__() self.encoder = encoder self.decoder =", "random.random() < teacher_force_ratio else best_guess return outputs ### We're ready", "ready to define everything we need for training our Seq2Seq", "enc_dropout ).to(device) decoder_net = Decoder( input_size_decoder, decoder_embedding_size, hidden_size, output_size, num_layers,", "input in that form. For example if we have MNIST", "to do some reshapin. While we're at it # Let's", "exploding gradient issues, makes sure grads are # within a", "shape: (1, N, hidden_size) predictions = self.fc(outputs) # predictions shape:", "(seq_length, N, hidden_size) return hidden, cell class Decoder(nn.Module): def __init__(", "print(f\"[Epoch {epoch} / {num_epochs}]\") checkpoint = {\"state_dict\": model.state_dict(), \"optimizer\": optimizer.state_dict()}", "decoder): super(Seq2Seq, self).__init__() self.encoder = encoder self.decoder = decoder def", "# embedding shape: (seq_length, N, embedding_size) outputs, (hidden, cell) =", "it in a similar # way that we have output_words", "just (N). Here we can view it in a similar", "in the vocabulary) best_guess = output.argmax(1) # With probability of", "(seq_length, N, embedding_size) outputs, (hidden, cell) = self.rnn(embedding) # outputs", "class Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size, num_layers, p): super(Encoder,", "decoder_net = Decoder( input_size_decoder, decoder_embedding_size, hidden_size, output_size, num_layers, dec_dropout, ).to(device)", "shape (trg_len, batch_size, output_dim) but Cross Entropy Loss # doesn't", "from torchtext.datasets import Multi30k from torchtext.data import Field, BucketIterator import", "output_size = len(english.vocab) encoder_embedding_size = 300 decoder_embedding_size = 300 hidden_size", "the Decoder predicted it to be. # Teacher Forcing is", "load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"), model, optimizer) sentence = \"ein boot mit mehreren männern", "checkpoint = {\"state_dict\": model.state_dict(), \"optimizer\": optimizer.state_dict()} save_checkpoint(checkpoint) model.eval() translated_sentence =", "that we want to send in into # our cost", "= num_layers self.embedding = nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size,", "for both RNN's num_layers = 2 enc_dropout = 0.5 dec_dropout", "plot writer = SummaryWriter(f\"runs/loss_plot\") step = 0 train_iterator, valid_iterator, test_iterator", "def __init__(self, input_size, embedding_size, hidden_size, num_layers, p): super(Encoder, self).__init__() self.dropout", "Decoder which will be <SOS> token x = target[0] for", "to send it to # loss function we want it", "class Seq2Seq(nn.Module): def __init__(self, encoder, decoder): super(Seq2Seq, self).__init__() self.encoder =", "N, embedding_size) outputs, (hidden, cell) = self.rnn(embedding, (hidden, cell)) #", "seq_length # is 1 here because we are sending in", "Needs to be the same for both RNN's num_layers =", "it # Let's also remove the start token while we're", "prediction outputs[t] = output # Get the best word the", "take input in that form. For example if we have", "nn import torch.optim as optim from torchtext.datasets import Multi30k from", ") german.build_vocab(train_data, max_size=10000, min_freq=2) english.build_vocab(train_data, max_size=10000, min_freq=2) class Encoder(nn.Module): def", "1024 # Needs to be the same for both RNN's", "step += 1 score = bleu(test_data[1:100], model, german, english, device)", "size, we want it to be (1, N), seq_length #", "10) and targets just (N). Here we can view it", "batch_size = 64 # Model hyperparameters load_model = False device", "import Multi30k from torchtext.data import Field, BucketIterator import numpy as", "= nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) def forward(self, x): # x", "but Cross Entropy Loss # doesn't take input in that", "hidden, cell) # Store next output prediction outputs[t] = output", "= torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") input_size_encoder = len(german.vocab) input_size_decoder", "1 score = bleu(test_data[1:100], model, german, english, device) print(f\"Bleu score", "the same for both RNN's num_layers = 2 enc_dropout =", "want it to be (N, length_target_vocabulary) so we're # just", "Entropy Loss # doesn't take input in that form. For", "device=device, ) encoder_net = Encoder( input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout", "Output is of shape (trg_len, batch_size, output_dim) but Cross Entropy", "is of shape (trg_len, batch_size, output_dim) but Cross Entropy Loss", "SummaryWriter(f\"runs/loss_plot\") step = 0 train_iterator, valid_iterator, test_iterator = BucketIterator.splits( (train_data,", "= spacy.load(\"de\") spacy_eng = spacy.load(\"en\") def tokenize_ger(text): return [tok.text for", "hidden_size, num_layers, dropout=p) self.fc = nn.Linear(hidden_size, output_size) def forward(self, x,", "import spacy import random from torch.utils.tensorboard import SummaryWriter # to", "a long comment. x = target[t] if random.random() < teacher_force_ratio", "seeing # similar inputs at training and testing time, if", "Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size, num_layers, p): super(Encoder, self).__init__()", "/ {num_epochs}]\") checkpoint = {\"state_dict\": model.state_dict(), \"optimizer\": optimizer.state_dict()} save_checkpoint(checkpoint) model.eval()", "word # otherwise we take the word that the Decoder", "in spacy_ger.tokenizer(text)] def tokenize_eng(text): return [tok.text for tok in spacy_eng.tokenizer(text)]", "have MNIST we want to have # output to be:", "valid_data, test_data = Multi30k.splits( exts=(\".de\", \".en\"), fields=(german, english) ) german.build_vocab(train_data,", "num_epochs = 100 learning_rate = 0.001 batch_size = 64 #", "hidden, cell = self.encoder(source) # Grab the first input to", "gets used to seeing # similar inputs at training and", "= x.unsqueeze(0) embedding = self.dropout(self.embedding(x)) # embedding shape: (1, N,", "we want to have # output to be: (N, 10)", "that the model gets used to seeing # similar inputs", "torch.zeros(target_len, batch_size, target_vocab_size).to(device) hidden, cell = self.encoder(source) # Grab the", "# Let's also remove the start token while we're at", "While we're at it # Let's also remove the start", "= 100 learning_rate = 0.001 batch_size = 64 # Model", "nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) def forward(self, x): # x shape:", "first input to the Decoder which will be <SOS> token", "t in range(1, target_len): # Use previous hidden, cell as", ") encoder_net = Encoder( input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout ).to(device)", "need for training our Seq2Seq model ### # Training hyperparameters", "to be (N, length_target_vocabulary) so we're # just gonna remove", "embedding_size, hidden_size, num_layers, p): super(Encoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size", "be the same for both RNN's num_layers = 2 enc_dropout", "can view it in a similar # way that we", "model, sentence, german, english, device, max_length=50 ) print(f\"Translated example sentence:", "num_layers, p ): super(Decoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size =", "predictions, hidden, cell class Seq2Seq(nn.Module): def __init__(self, encoder, decoder): super(Seq2Seq,", "= Decoder( input_size_decoder, decoder_embedding_size, hidden_size, output_size, num_layers, dec_dropout, ).to(device) model", "N is for batch size, we want it to be", "random from torch.utils.tensorboard import SummaryWriter # to print to tensorboard", "hidden_size, num_layers, p): super(Encoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size =", "best word the Decoder predicted (index in the vocabulary) best_guess", "batch_size that we want to send in into # our", "Forcing is used so that the model gets used to", "batch_size=batch_size, sort_within_batch=True, sort_key=lambda x: len(x.src), device=device, ) encoder_net = Encoder(", "enumerate(train_iterator): # Get input and targets and get to cuda", "if load_model: load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"), model, optimizer) sentence = \"ein boot mit", "tokenize_ger(text): return [tok.text for tok in spacy_ger.tokenizer(text)] def tokenize_eng(text): return", "dropout=p) def forward(self, x): # x shape: (seq_length, N) where", "self.dropout(self.embedding(x)) # embedding shape: (1, N, embedding_size) outputs, (hidden, cell)", "predictions = self.fc(outputs) # predictions shape: (1, N, length_target_vocabulary) to", "to seeing # similar inputs at training and testing time,", "# way that we have output_words * batch_size that we", "to get nice loss plot writer = SummaryWriter(f\"runs/loss_plot\") step =", "Field( tokenize=tokenize_eng, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\" ) train_data, valid_data, test_data =", "prop loss.backward() # Clip to avoid exploding gradient issues, makes", "save_checkpoint(checkpoint) model.eval() translated_sentence = translate_sentence( model, sentence, german, english, device,", "target_vocab_size = len(english.vocab) outputs = torch.zeros(target_len, batch_size, target_vocab_size).to(device) hidden, cell", "that the Decoder predicted it to be. # Teacher Forcing", "Multi30k from torchtext.data import Field, BucketIterator import numpy as np", "Here we can view it in a similar # way", "cell class Decoder(nn.Module): def __init__( self, input_size, embedding_size, hidden_size, output_size,", "while we're at it output = output[1:].reshape(-1, output.shape[2]) target =", "in into # our cost function, so we need to", "training and testing time, if teacher forcing is 1 #", "need to do some reshapin. While we're at it #", "= 0 train_iterator, valid_iterator, test_iterator = BucketIterator.splits( (train_data, valid_data, test_data),", "SummaryWriter # to print to tensorboard from utils import translate_sentence,", "context from encoder at start output, hidden, cell = self.decoder(x,", "hyperparameters load_model = False device = torch.device(\"cuda\" if torch.cuda.is_available() else", "sort_within_batch=True, sort_key=lambda x: len(x.src), device=device, ) encoder_net = Encoder( input_size_encoder,", "nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) self.fc =", "at start output, hidden, cell = self.decoder(x, hidden, cell) #", "to be. # Teacher Forcing is used so that the", "cell): # x shape: (N) where N is for batch", "# network is used to. This was a long comment.", "x = target[t] if random.random() < teacher_force_ratio else best_guess return", "we take the actual next word # otherwise we take", "cost function, so we need to do some reshapin. While", "descent step optimizer.step() # Plot to tensorboard writer.add_scalar(\"Training loss\", loss,", "embedding_size, hidden_size, output_size, num_layers, p ): super(Decoder, self).__init__() self.dropout =", "train_iterator, valid_iterator, test_iterator = BucketIterator.splits( (train_data, valid_data, test_data), batch_size=batch_size, sort_within_batch=True,", "= decoder def forward(self, source, target, teacher_force_ratio=0.5): batch_size = source.shape[1]", "input and targets and get to cuda inp_data = batch.src.to(device)", "then inputs at test time might be completely different than", "Grab the first input to the Decoder which will be", "): super(Decoder, self).__init__() self.dropout = nn.Dropout(p) self.hidden_size = hidden_size self.num_layers", "N), seq_length # is 1 here because we are sending", "teacher forcing is 1 # then inputs at test time", "eos_token=\"<eos>\" ) train_data, valid_data, test_data = Multi30k.splits( exts=(\".de\", \".en\"), fields=(german,", "word that the Decoder predicted it to be. # Teacher", "(1, N), seq_length # is 1 here because we are", "not a sentence x = x.unsqueeze(0) embedding = self.dropout(self.embedding(x)) #", "loss, global_step=step) step += 1 score = bleu(test_data[1:100], model, german,", "avoid exploding gradient issues, makes sure grads are # within", "range(1, target_len): # Use previous hidden, cell as context from", "predictions = predictions.squeeze(0) return predictions, hidden, cell class Seq2Seq(nn.Module): def", "at training and testing time, if teacher forcing is 1", "output = model(inp_data, target) # Output is of shape (trg_len,", "Forward prop output = model(inp_data, target) # Output is of", "within a healthy range torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1) # Gradient descent step", "return predictions, hidden, cell class Seq2Seq(nn.Module): def __init__(self, encoder, decoder):", "= nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) self.fc = nn.Linear(hidden_size, output_size) def", "want to have # output to be: (N, 10) and", "hidden_size self.num_layers = num_layers self.embedding = nn.Embedding(input_size, embedding_size) self.rnn =", "actual next word # otherwise we take the word that", "input_size_encoder = len(german.vocab) input_size_decoder = len(english.vocab) output_size = len(english.vocab) encoder_embedding_size", "= \"ein boot mit mehreren männern darauf wird von einem", "just gonna remove the first dim predictions = predictions.squeeze(0) return", "# Model hyperparameters load_model = False device = torch.device(\"cuda\" if", "the best word the Decoder predicted (index in the vocabulary)", "outputs ### We're ready to define everything we need for", "# outputs shape: (1, N, hidden_size) predictions = self.fc(outputs) #", "= output.argmax(1) # With probability of teacher_force_ratio we take the", "as nn import torch.optim as optim from torchtext.datasets import Multi30k", "import torch.nn as nn import torch.optim as optim from torchtext.datasets", "input to the Decoder which will be <SOS> token x", "= Field( tokenize=tokenize_eng, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\" ) train_data, valid_data, test_data", "to have # output to be: (N, 10) and targets", "have # output to be: (N, 10) and targets just", "token while we're at it output = output[1:].reshape(-1, output.shape[2]) target", "dec_dropout, ).to(device) model = Seq2Seq(encoder_net, decoder_net).to(device) optimizer = optim.Adam(model.parameters(), lr=learning_rate)", "single word and not a sentence x = x.unsqueeze(0) embedding", "self.decoder = decoder def forward(self, source, target, teacher_force_ratio=0.5): batch_size =", "Seq2Seq(encoder_net, decoder_net).to(device) optimizer = optim.Adam(model.parameters(), lr=learning_rate) pad_idx = english.vocab.stoi[\"<pad>\"] criterion", "sentence = \"ein boot mit mehreren männern darauf wird von", "inputs at training and testing time, if teacher forcing is", "spacy_eng = spacy.load(\"en\") def tokenize_ger(text): return [tok.text for tok in", "batch_size, output_dim) but Cross Entropy Loss # doesn't take input", "so we're # just gonna remove the first dim predictions", "self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers, dropout=p) def forward(self, x): #", "shape: (1, N, embedding_size) outputs, (hidden, cell) = self.rnn(embedding, (hidden,", "# Output is of shape (trg_len, batch_size, output_dim) but Cross", "we have output_words * batch_size that we want to send", "get to cuda inp_data = batch.src.to(device) target = batch.trg.to(device) #", "{num_epochs}]\") checkpoint = {\"state_dict\": model.state_dict(), \"optimizer\": optimizer.state_dict()} save_checkpoint(checkpoint) model.eval() translated_sentence", "if we have MNIST we want to have # output", "<SOS> token x = target[0] for t in range(1, target_len):", "we're # just gonna remove the first dim predictions =", "Training hyperparameters num_epochs = 100 learning_rate = 0.001 batch_size =", "= nn.CrossEntropyLoss(ignore_index=pad_idx) if load_model: load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"), model, optimizer) sentence = \"ein", "Decoder(nn.Module): def __init__( self, input_size, embedding_size, hidden_size, output_size, num_layers, p", "sure grads are # within a healthy range torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm=1)", "boot mit mehreren männern darauf wird von einem großen pferdegespann", "return [tok.text for tok in spacy_eng.tokenizer(text)] german = Field(tokenize=tokenize_ger, lower=True,", "= 0.001 batch_size = 64 # Model hyperparameters load_model =", "(trg_len, batch_size, output_dim) but Cross Entropy Loss # doesn't take", "tok in spacy_eng.tokenizer(text)] german = Field(tokenize=tokenize_ger, lower=True, init_token=\"<sos>\", eos_token=\"<eos>\") english", "x = target[0] for t in range(1, target_len): # Use", "Model hyperparameters load_model = False device = torch.device(\"cuda\" if torch.cuda.is_available()", "so that the model gets used to seeing # similar", "cell) = self.rnn(embedding) # outputs shape: (seq_length, N, hidden_size) return", "= Encoder( input_size_encoder, encoder_embedding_size, hidden_size, num_layers, enc_dropout ).to(device) decoder_net =", "where N is batch size embedding = self.dropout(self.embedding(x)) # embedding", "writer.add_scalar(\"Training loss\", loss, global_step=step) step += 1 score = bleu(test_data[1:100],", "# Needs to be the same for both RNN's num_layers", "1 here because we are sending in a single word", "english.build_vocab(train_data, max_size=10000, min_freq=2) class Encoder(nn.Module): def __init__(self, input_size, embedding_size, hidden_size,", "= self.dropout(self.embedding(x)) # embedding shape: (1, N, embedding_size) outputs, (hidden,", "forward(self, x): # x shape: (seq_length, N) where N is", "x shape: (seq_length, N) where N is batch size embedding", "Clip to avoid exploding gradient issues, makes sure grads are", "to be the same for both RNN's num_layers = 2", "probability of teacher_force_ratio we take the actual next word #", "spacy_ger.tokenizer(text)] def tokenize_eng(text): return [tok.text for tok in spacy_eng.tokenizer(text)] german", "optim from torchtext.datasets import Multi30k from torchtext.data import Field, BucketIterator", "= target[0] for t in range(1, target_len): # Use previous", "= spacy.load(\"en\") def tokenize_ger(text): return [tok.text for tok in spacy_ger.tokenizer(text)]", "2 enc_dropout = 0.5 dec_dropout = 0.5 # Tensorboard to", "lr=learning_rate) pad_idx = english.vocab.stoi[\"<pad>\"] criterion = nn.CrossEntropyLoss(ignore_index=pad_idx) if load_model: load_checkpoint(torch.load(\"my_checkpoint.pth.tar\"),", "test_data = Multi30k.splits( exts=(\".de\", \".en\"), fields=(german, english) ) german.build_vocab(train_data, max_size=10000,", "used to. This was a long comment. x = target[t]", "fields=(german, english) ) german.build_vocab(train_data, max_size=10000, min_freq=2) english.build_vocab(train_data, max_size=10000, min_freq=2) class", "cell = self.decoder(x, hidden, cell) # Store next output prediction", "= nn.Linear(hidden_size, output_size) def forward(self, x, hidden, cell): # x", "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\") input_size_encoder = len(german.vocab)", "von einem großen pferdegespann ans ufer gezogen.\" for epoch in", "# Clip to avoid exploding gradient issues, makes sure grads", "len(x.src), device=device, ) encoder_net = Encoder( input_size_encoder, encoder_embedding_size, hidden_size, num_layers,", "forward(self, x, hidden, cell): # x shape: (N) where N", "forcing is 1 # then inputs at test time might", "{translated_sentence}\") model.train() for batch_idx, batch in enumerate(train_iterator): # Get input", "forward(self, source, target, teacher_force_ratio=0.5): batch_size = source.shape[1] target_len = target.shape[0]", "self.decoder(x, hidden, cell) # Store next output prediction outputs[t] =", "output[1:].reshape(-1, output.shape[2]) target = target[1:].reshape(-1) optimizer.zero_grad() loss = criterion(output, target)", "num_layers self.embedding = nn.Embedding(input_size, embedding_size) self.rnn = nn.LSTM(embedding_size, hidden_size, num_layers,", "to the Decoder which will be <SOS> token x =", "it to be (1, N), seq_length # is 1 here", "= {\"state_dict\": model.state_dict(), \"optimizer\": optimizer.state_dict()} save_checkpoint(checkpoint) model.eval() translated_sentence = translate_sentence(", "encoder at start output, hidden, cell = self.decoder(x, hidden, cell)", "optimizer.state_dict()} save_checkpoint(checkpoint) model.eval() translated_sentence = translate_sentence( model, sentence, german, english,", "class Decoder(nn.Module): def __init__( self, input_size, embedding_size, hidden_size, output_size, num_layers,", "# embedding shape: (1, N, embedding_size) outputs, (hidden, cell) =", "\\n {translated_sentence}\") model.train() for batch_idx, batch in enumerate(train_iterator): # Get", "best_guess return outputs ### We're ready to define everything we", "train_data, valid_data, test_data = Multi30k.splits( exts=(\".de\", \".en\"), fields=(german, english) )" ]
[ "self.train_step != 0: self.checkpoint([sample, utterances]) return [{\"id\": self.id} for _", "history = self.histories[i] history.append( (self.self_speaker_token if not other else self.other_speaker_token)", "import Dict, Any, List import string from parlai.core.agents import Agent", "class LightImitateMixin(Agent): \"\"\"Abstract class that handles passing expert trajectories alongside", "[ turn.replace(self.self_speaker_token, self.other_speaker_token) if self.self_speaker_token in turn else turn.replace(self.other_speaker_token, self.self_speaker_token)", ") and self.train_step != 0: self.checkpoint([sample, utterances]) return [{\"id\": self.id}", "ones imitate = [] sample = [] for i, observation", "history.append( (self.self_speaker_token if not other else self.other_speaker_token) + utterances[i] )", "import string from parlai.core.agents import Agent from parlai.core.message import Message", "\"\"\"Update weights here\"\"\" pass def _update_histories(self, utterances, other=False): for i", "(self.self_speaker_token if not other else self.other_speaker_token) + utterances[i] ) self.histories[i]", "0 ) and self.train_step != 0: self.checkpoint([sample, utterances]) return [{\"id\":", "\"\"\"Implement sampling utterances and memorization here\"\"\" pass def batch_sample(self, dialogs)", "0 ] ) self.batch_imitate(imitate) utterances = self.batch_sample(sample) if ( self.train_step", "for i, observation in enumerate(observations): sample.extend( [ (dialog[0], dialog[1][:-1]) for", "that handles passing expert trajectories alongside self-play sampling \"\"\" def", "LightImitateMixin(Agent): \"\"\"Abstract class that handles passing expert trajectories alongside self-play", "raise NotImplementedError() def batch_act(self, observations): self.train_step += 1 # Add", "self.id} for _ in observations] def batch_imitate(self, dialogs): \"\"\"Implement sampling", "len(dialog[1]) > 0 ] ) self.batch_imitate(imitate) utterances = self.batch_sample(sample) if", "len(dialog[1]) > 0 ] ) imitate.extend( [ dialog for dialog", "to data ones imitate = [] sample = [] for", "self.histories[i] = history def _convert_history_to_other(self, history): history = [ turn.replace(self.self_speaker_token,", "dialog[1][:-1]) for dialog in observation[\"text\"] if len(dialog[1]) > 0 ]", "self.train_step += 1 # Add generated histories to data ones", "def _convert_history_to_other(self, history): history = [ turn.replace(self.self_speaker_token, self.other_speaker_token) if self.self_speaker_token", "= history def _convert_history_to_other(self, history): history = [ turn.replace(self.self_speaker_token, self.other_speaker_token)", "= [ turn.replace(self.self_speaker_token, self.other_speaker_token) if self.self_speaker_token in turn else turn.replace(self.other_speaker_token,", "opt: Dict[str, Any], shared: Dict[str, Any] = None): self.id =", "_update_histories(self, utterances, other=False): for i in range(len(utterances)): history = self.histories[i]", "imitate.extend( [ dialog for dialog in observation[\"text\"] if len(dialog[1]) >", "def act(self): raise NotImplementedError() def batch_act(self, observations): self.train_step += 1", "pass def batch_sample(self, dialogs) -> List[str]: \"\"\"Implement update here\"\"\" pass", "[{\"id\": self.id} for _ in observations] def batch_imitate(self, dialogs): \"\"\"Implement", "sampling utterances and memorization here\"\"\" pass def batch_sample(self, dialogs) ->", "= 0 self.self_speaker_token = \"<speaker_self>\" self.other_speaker_token = \"<speaker_other>\" def act(self):", "observations): self.train_step += 1 # Add generated histories to data", "self.id = \"LightChatbotSelfPlay\" self.train_step = 0 self.self_speaker_token = \"<speaker_self>\" self.other_speaker_token", "i, observation in enumerate(observations): sample.extend( [ (dialog[0], dialog[1][:-1]) for dialog", "here\"\"\" pass def batch_sample(self, dialogs) -> List[str]: \"\"\"Implement update here\"\"\"", "0 self.self_speaker_token = \"<speaker_self>\" self.other_speaker_token = \"<speaker_other>\" def act(self): raise", "sample import pathlib path = pathlib.Path(__file__).parent.absolute() class LightImitateMixin(Agent): \"\"\"Abstract class", "typing import Dict, Any, List import string from parlai.core.agents import", "self.batch_imitate(imitate) utterances = self.batch_sample(sample) if ( self.train_step % self.episode_num_dialog_dump ==", "def batch_imitate(self, dialogs): \"\"\"Implement sampling utterances and memorization here\"\"\" pass", "def batch_act(self, observations): self.train_step += 1 # Add generated histories", "self.episode_num_dialog_dump == 0 ) and self.train_step != 0: self.checkpoint([sample, utterances])", "utterances and memorization here\"\"\" pass def batch_sample(self, dialogs) -> List[str]:", "= self.batch_sample(sample) if ( self.train_step % self.episode_num_dialog_dump == 0 )", "else turn.replace(self.other_speaker_token, self.self_speaker_token) for turn in history ] return history", "utterances, other=False): for i in range(len(utterances)): history = self.histories[i] history.append(", "other else self.other_speaker_token) + utterances[i] ) self.histories[i] = history def", "dialog for dialog in observation[\"text\"] if len(dialog[1]) > 0 ]", "expert trajectories alongside self-play sampling \"\"\" def __init__(self, opt: Dict[str,", "here\"\"\" pass def batch_update(self): \"\"\"Update weights here\"\"\" pass def _update_histories(self,", "turn.replace(self.self_speaker_token, self.other_speaker_token) if self.self_speaker_token in turn else turn.replace(self.other_speaker_token, self.self_speaker_token) for", "batch_imitate(self, dialogs): \"\"\"Implement sampling utterances and memorization here\"\"\" pass def", "utterances = self.batch_sample(sample) if ( self.train_step % self.episode_num_dialog_dump == 0", "act(self): raise NotImplementedError() def batch_act(self, observations): self.train_step += 1 #", "from parlai.core.agents import Agent from parlai.core.message import Message from random", "[] sample = [] for i, observation in enumerate(observations): sample.extend(", "if not other else self.other_speaker_token) + utterances[i] ) self.histories[i] =", "not other else self.other_speaker_token) + utterances[i] ) self.histories[i] = history", "\"<speaker_self>\" self.other_speaker_token = \"<speaker_other>\" def act(self): raise NotImplementedError() def batch_act(self,", "trajectories alongside self-play sampling \"\"\" def __init__(self, opt: Dict[str, Any],", "self.self_speaker_token in turn else turn.replace(self.other_speaker_token, self.self_speaker_token) for turn in history", "\"LightChatbotSelfPlay\" self.train_step = 0 self.self_speaker_token = \"<speaker_self>\" self.other_speaker_token = \"<speaker_other>\"", "here\"\"\" pass def _update_histories(self, utterances, other=False): for i in range(len(utterances)):", "= [] sample = [] for i, observation in enumerate(observations):", ") self.histories[i] = history def _convert_history_to_other(self, history): history = [", "alongside self-play sampling \"\"\" def __init__(self, opt: Dict[str, Any], shared:", "[] for i, observation in enumerate(observations): sample.extend( [ (dialog[0], dialog[1][:-1])", "import Agent from parlai.core.message import Message from random import sample", "passing expert trajectories alongside self-play sampling \"\"\" def __init__(self, opt:", "batch_sample(self, dialogs) -> List[str]: \"\"\"Implement update here\"\"\" pass def batch_update(self):", "parlai.core.agents import Agent from parlai.core.message import Message from random import", "i in range(len(utterances)): history = self.histories[i] history.append( (self.self_speaker_token if not", "1 # Add generated histories to data ones imitate =", "!= 0: self.checkpoint([sample, utterances]) return [{\"id\": self.id} for _ in", "List import string from parlai.core.agents import Agent from parlai.core.message import", "for dialog in observation[\"text\"] if len(dialog[1]) > 0 ] )", "in observations] def batch_imitate(self, dialogs): \"\"\"Implement sampling utterances and memorization", "range(len(utterances)): history = self.histories[i] history.append( (self.self_speaker_token if not other else", "dialogs) -> List[str]: \"\"\"Implement update here\"\"\" pass def batch_update(self): \"\"\"Update", "self.self_speaker_token = \"<speaker_self>\" self.other_speaker_token = \"<speaker_other>\" def act(self): raise NotImplementedError()", "data ones imitate = [] sample = [] for i,", "batch_update(self): \"\"\"Update weights here\"\"\" pass def _update_histories(self, utterances, other=False): for", "= [] for i, observation in enumerate(observations): sample.extend( [ (dialog[0],", "] ) self.batch_imitate(imitate) utterances = self.batch_sample(sample) if ( self.train_step %", "pass def _update_histories(self, utterances, other=False): for i in range(len(utterances)): history", "self.train_step = 0 self.self_speaker_token = \"<speaker_self>\" self.other_speaker_token = \"<speaker_other>\" def", "sampling \"\"\" def __init__(self, opt: Dict[str, Any], shared: Dict[str, Any]", "( self.train_step % self.episode_num_dialog_dump == 0 ) and self.train_step !=", "= pathlib.Path(__file__).parent.absolute() class LightImitateMixin(Agent): \"\"\"Abstract class that handles passing expert", "path = pathlib.Path(__file__).parent.absolute() class LightImitateMixin(Agent): \"\"\"Abstract class that handles passing", "dialogs): \"\"\"Implement sampling utterances and memorization here\"\"\" pass def batch_sample(self,", "dialog in observation[\"text\"] if len(dialog[1]) > 0 ] ) imitate.extend(", "> 0 ] ) self.batch_imitate(imitate) utterances = self.batch_sample(sample) if (", "class that handles passing expert trajectories alongside self-play sampling \"\"\"", "] ) imitate.extend( [ dialog for dialog in observation[\"text\"] if", "for _ in observations] def batch_imitate(self, dialogs): \"\"\"Implement sampling utterances", "-> List[str]: \"\"\"Implement update here\"\"\" pass def batch_update(self): \"\"\"Update weights", "history = [ turn.replace(self.self_speaker_token, self.other_speaker_token) if self.self_speaker_token in turn else", "Add generated histories to data ones imitate = [] sample", "histories to data ones imitate = [] sample = []", "def __init__(self, opt: Dict[str, Any], shared: Dict[str, Any] = None):", "in observation[\"text\"] if len(dialog[1]) > 0 ] ) imitate.extend( [", "from random import sample import pathlib path = pathlib.Path(__file__).parent.absolute() class", "+= 1 # Add generated histories to data ones imitate", ") imitate.extend( [ dialog for dialog in observation[\"text\"] if len(dialog[1])", "_ in observations] def batch_imitate(self, dialogs): \"\"\"Implement sampling utterances and", "\"<speaker_other>\" def act(self): raise NotImplementedError() def batch_act(self, observations): self.train_step +=", "__init__(self, opt: Dict[str, Any], shared: Dict[str, Any] = None): self.id", "imitate = [] sample = [] for i, observation in", "memorization here\"\"\" pass def batch_sample(self, dialogs) -> List[str]: \"\"\"Implement update", "= None): self.id = \"LightChatbotSelfPlay\" self.train_step = 0 self.self_speaker_token =", "observations] def batch_imitate(self, dialogs): \"\"\"Implement sampling utterances and memorization here\"\"\"", "_convert_history_to_other(self, history): history = [ turn.replace(self.self_speaker_token, self.other_speaker_token) if self.self_speaker_token in", "else self.other_speaker_token) + utterances[i] ) self.histories[i] = history def _convert_history_to_other(self,", "> 0 ] ) imitate.extend( [ dialog for dialog in", "dialog in observation[\"text\"] if len(dialog[1]) > 0 ] ) self.batch_imitate(imitate)", "# Add generated histories to data ones imitate = []", "utterances]) return [{\"id\": self.id} for _ in observations] def batch_imitate(self,", "utterances[i] ) self.histories[i] = history def _convert_history_to_other(self, history): history =", "0 ] ) imitate.extend( [ dialog for dialog in observation[\"text\"]", "if self.self_speaker_token in turn else turn.replace(self.other_speaker_token, self.self_speaker_token) for turn in", "def batch_sample(self, dialogs) -> List[str]: \"\"\"Implement update here\"\"\" pass def", "(dialog[0], dialog[1][:-1]) for dialog in observation[\"text\"] if len(dialog[1]) > 0", "if len(dialog[1]) > 0 ] ) imitate.extend( [ dialog for", "if ( self.train_step % self.episode_num_dialog_dump == 0 ) and self.train_step", "return [{\"id\": self.id} for _ in observations] def batch_imitate(self, dialogs):", "generated histories to data ones imitate = [] sample =", "string from parlai.core.agents import Agent from parlai.core.message import Message from", "in enumerate(observations): sample.extend( [ (dialog[0], dialog[1][:-1]) for dialog in observation[\"text\"]", "and memorization here\"\"\" pass def batch_sample(self, dialogs) -> List[str]: \"\"\"Implement", "batch_act(self, observations): self.train_step += 1 # Add generated histories to", "= \"<speaker_self>\" self.other_speaker_token = \"<speaker_other>\" def act(self): raise NotImplementedError() def", "other=False): for i in range(len(utterances)): history = self.histories[i] history.append( (self.self_speaker_token", "in turn else turn.replace(self.other_speaker_token, self.self_speaker_token) for turn in history ]", "update here\"\"\" pass def batch_update(self): \"\"\"Update weights here\"\"\" pass def", "NotImplementedError() def batch_act(self, observations): self.train_step += 1 # Add generated", "Dict[str, Any] = None): self.id = \"LightChatbotSelfPlay\" self.train_step = 0", "% self.episode_num_dialog_dump == 0 ) and self.train_step != 0: self.checkpoint([sample,", "pathlib path = pathlib.Path(__file__).parent.absolute() class LightImitateMixin(Agent): \"\"\"Abstract class that handles", "= self.histories[i] history.append( (self.self_speaker_token if not other else self.other_speaker_token) +", "observation[\"text\"] if len(dialog[1]) > 0 ] ) imitate.extend( [ dialog", "0: self.checkpoint([sample, utterances]) return [{\"id\": self.id} for _ in observations]", "observation in enumerate(observations): sample.extend( [ (dialog[0], dialog[1][:-1]) for dialog in", ") self.batch_imitate(imitate) utterances = self.batch_sample(sample) if ( self.train_step % self.episode_num_dialog_dump", "self.checkpoint([sample, utterances]) return [{\"id\": self.id} for _ in observations] def", "import Message from random import sample import pathlib path =", "Dict, Any, List import string from parlai.core.agents import Agent from", "parlai.core.message import Message from random import sample import pathlib path", "handles passing expert trajectories alongside self-play sampling \"\"\" def __init__(self,", "self.train_step % self.episode_num_dialog_dump == 0 ) and self.train_step != 0:", "self.other_speaker_token) if self.self_speaker_token in turn else turn.replace(self.other_speaker_token, self.self_speaker_token) for turn", "= \"LightChatbotSelfPlay\" self.train_step = 0 self.self_speaker_token = \"<speaker_self>\" self.other_speaker_token =", "for i in range(len(utterances)): history = self.histories[i] history.append( (self.self_speaker_token if", "self.other_speaker_token) + utterances[i] ) self.histories[i] = history def _convert_history_to_other(self, history):", "\"\"\" def __init__(self, opt: Dict[str, Any], shared: Dict[str, Any] =", "self-play sampling \"\"\" def __init__(self, opt: Dict[str, Any], shared: Dict[str,", "random import sample import pathlib path = pathlib.Path(__file__).parent.absolute() class LightImitateMixin(Agent):", "self.histories[i] history.append( (self.self_speaker_token if not other else self.other_speaker_token) + utterances[i]", "weights here\"\"\" pass def _update_histories(self, utterances, other=False): for i in", "None): self.id = \"LightChatbotSelfPlay\" self.train_step = 0 self.self_speaker_token = \"<speaker_self>\"", "sample.extend( [ (dialog[0], dialog[1][:-1]) for dialog in observation[\"text\"] if len(dialog[1])", "import pathlib path = pathlib.Path(__file__).parent.absolute() class LightImitateMixin(Agent): \"\"\"Abstract class that", "= \"<speaker_other>\" def act(self): raise NotImplementedError() def batch_act(self, observations): self.train_step", "== 0 ) and self.train_step != 0: self.checkpoint([sample, utterances]) return", "from parlai.core.message import Message from random import sample import pathlib", "def batch_update(self): \"\"\"Update weights here\"\"\" pass def _update_histories(self, utterances, other=False):", "if len(dialog[1]) > 0 ] ) self.batch_imitate(imitate) utterances = self.batch_sample(sample)", "turn else turn.replace(self.other_speaker_token, self.self_speaker_token) for turn in history ] return", "Agent from parlai.core.message import Message from random import sample import", "self.other_speaker_token = \"<speaker_other>\" def act(self): raise NotImplementedError() def batch_act(self, observations):", "and self.train_step != 0: self.checkpoint([sample, utterances]) return [{\"id\": self.id} for", "[ (dialog[0], dialog[1][:-1]) for dialog in observation[\"text\"] if len(dialog[1]) >", "shared: Dict[str, Any] = None): self.id = \"LightChatbotSelfPlay\" self.train_step =", "+ utterances[i] ) self.histories[i] = history def _convert_history_to_other(self, history): history", "\"\"\"Abstract class that handles passing expert trajectories alongside self-play sampling", "import sample import pathlib path = pathlib.Path(__file__).parent.absolute() class LightImitateMixin(Agent): \"\"\"Abstract", "Any, List import string from parlai.core.agents import Agent from parlai.core.message", "def _update_histories(self, utterances, other=False): for i in range(len(utterances)): history =", "self.batch_sample(sample) if ( self.train_step % self.episode_num_dialog_dump == 0 ) and", "Message from random import sample import pathlib path = pathlib.Path(__file__).parent.absolute()", "pass def batch_update(self): \"\"\"Update weights here\"\"\" pass def _update_histories(self, utterances,", "history): history = [ turn.replace(self.self_speaker_token, self.other_speaker_token) if self.self_speaker_token in turn", "in observation[\"text\"] if len(dialog[1]) > 0 ] ) self.batch_imitate(imitate) utterances", "in range(len(utterances)): history = self.histories[i] history.append( (self.self_speaker_token if not other", "from typing import Dict, Any, List import string from parlai.core.agents", "Any] = None): self.id = \"LightChatbotSelfPlay\" self.train_step = 0 self.self_speaker_token", "enumerate(observations): sample.extend( [ (dialog[0], dialog[1][:-1]) for dialog in observation[\"text\"] if", "pathlib.Path(__file__).parent.absolute() class LightImitateMixin(Agent): \"\"\"Abstract class that handles passing expert trajectories", "List[str]: \"\"\"Implement update here\"\"\" pass def batch_update(self): \"\"\"Update weights here\"\"\"", "Any], shared: Dict[str, Any] = None): self.id = \"LightChatbotSelfPlay\" self.train_step", "\"\"\"Implement update here\"\"\" pass def batch_update(self): \"\"\"Update weights here\"\"\" pass", "sample = [] for i, observation in enumerate(observations): sample.extend( [", "[ dialog for dialog in observation[\"text\"] if len(dialog[1]) > 0", "observation[\"text\"] if len(dialog[1]) > 0 ] ) self.batch_imitate(imitate) utterances =", "history def _convert_history_to_other(self, history): history = [ turn.replace(self.self_speaker_token, self.other_speaker_token) if", "Dict[str, Any], shared: Dict[str, Any] = None): self.id = \"LightChatbotSelfPlay\"" ]
[ "a singly-linked list of nodes Returns: whether or not the", "head = convert_list_to_linked_list([1,2,3,4,5,6]) >>> has_cycle(head) False >>> head.next.next.next.next.next.next = head.next.next", "def has_cycle(head: NodeType) -> bool: \"\"\" Args: head: head of", "-> bool: \"\"\" Args: head: head of a singly-linked list", "- Binary - Bit Manipulation - Blind 75 See Also:", "head.next.next.next.next.next.next = head.next.next print(\"LinkedList has cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next", "\"\"\" from pytudes._2021.utils.linked_list import ( ListNode, NodeType, convert_list_to_linked_list, ) def", "else: return False def main(): head = convert_list_to_linked_list([1, 2, 3,", "str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next.next print(\"LinkedList has cycle: \" + str(has_cycle(head)))", ">>> head.next.next.next.next.next.next = head.next.next >>> has_cycle(head) True >>> head.next.next.next.next.next.next =", "if slow == fast: return True # found the cycle", "singly-linked list of nodes Returns: whether or not the linked", "= convert_list_to_linked_list([1,2,3,4,5,6]) >>> has_cycle(head) False >>> head.next.next.next.next.next.next = head.next.next >>>", "True # found the cycle else: return False def main():", "has_cycle(head) True >>> head.next.next.next.next.next.next = head.next.next.next >>> has_cycle(head) True \"\"\"", "True >>> head = convert_list_to_linked_list([1,2,3,4,5,6]) >>> has_cycle(head) False >>> head.next.next.next.next.next.next", "NodeType, convert_list_to_linked_list, ) def has_cycle(head: NodeType) -> bool: \"\"\" Args:", "slow slow = slow.next fast = fast.next.next if slow ==", "import ( ListNode, NodeType, convert_list_to_linked_list, ) def has_cycle(head: NodeType) ->", "nodes Returns: whether or not the linked list has a", "= fast.next.next if slow == fast: return True # found", "cycle else: return False def main(): head = convert_list_to_linked_list([1, 2,", "and fast.next is not None: # since fast ≥ slow", "head.next.next.next.next.next.next = head.next.next.next print(\"LinkedList has cycle: \" + str(has_cycle(head))) main()", "See Also: - pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py \"\"\" from pytudes._2021.utils.linked_list import ( ListNode,", "a cycle Examples: >>> has_cycle(None) False >>> head = ListNode(\"self-edge\")", "slow == fast: return True # found the cycle else:", "None: # since fast ≥ slow slow = slow.next fast", "the cycle else: return False def main(): head = convert_list_to_linked_list([1,", "75 See Also: - pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py \"\"\" from pytudes._2021.utils.linked_list import (", "fast: return True # found the cycle else: return False", "= head.next.next print(\"LinkedList has cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next =", "True \"\"\" slow = fast = head while fast is", "while fast is not None and fast.next is not None:", "- pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py \"\"\" from pytudes._2021.utils.linked_list import ( ListNode, NodeType, convert_list_to_linked_list,", "has_cycle(head) False >>> head.next.next.next.next.next.next = head.next.next >>> has_cycle(head) True >>>", "= head.next.next >>> has_cycle(head) True >>> head.next.next.next.next.next.next = head.next.next.next >>>", "head = convert_list_to_linked_list([1, 2, 3, 4, 5, 6]) print(\"LinkedList has", "has a cycle Examples: >>> has_cycle(None) False >>> head =", "= head.next.next.next >>> has_cycle(head) True \"\"\" slow = fast =", "fast.next.next if slow == fast: return True # found the", "head.next.next.next.next.next.next = head.next.next.next >>> has_cycle(head) True \"\"\" slow = fast", "head >>> has_cycle(head) True >>> head = convert_list_to_linked_list([1,2,3,4,5,6]) >>> has_cycle(head)", "fast.next is not None: # since fast ≥ slow slow", "slow.next fast = fast.next.next if slow == fast: return True", "ListNode, NodeType, convert_list_to_linked_list, ) def has_cycle(head: NodeType) -> bool: \"\"\"", "fast = fast.next.next if slow == fast: return True #", "\"\"\" slow = fast = head while fast is not", "of nodes Returns: whether or not the linked list has", "whether or not the linked list has a cycle Examples:", "fast = head while fast is not None and fast.next", "has cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next print(\"LinkedList has", "NodeType) -> bool: \"\"\" Args: head: head of a singly-linked", "head.next.next.next.next.next.next = head.next.next >>> has_cycle(head) True >>> head.next.next.next.next.next.next = head.next.next.next", "== fast: return True # found the cycle else: return", "not None and fast.next is not None: # since fast", "convert_list_to_linked_list, ) def has_cycle(head: NodeType) -> bool: \"\"\" Args: head:", "Args: head: head of a singly-linked list of nodes Returns:", "has cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next.next print(\"LinkedList has", "pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py \"\"\" from pytudes._2021.utils.linked_list import ( ListNode, NodeType, convert_list_to_linked_list, )", "main(): head = convert_list_to_linked_list([1, 2, 3, 4, 5, 6]) print(\"LinkedList", "True >>> head.next.next.next.next.next.next = head.next.next.next >>> has_cycle(head) True \"\"\" slow", "has_cycle(None) False >>> head = ListNode(\"self-edge\") >>> head.next = head", "head.next.next print(\"LinkedList has cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next.next", "of a singly-linked list of nodes Returns: whether or not", "= head >>> has_cycle(head) True >>> head = convert_list_to_linked_list([1,2,3,4,5,6]) >>>", "# since fast ≥ slow slow = slow.next fast =", "convert_list_to_linked_list([1, 2, 3, 4, 5, 6]) print(\"LinkedList has cycle: \"", "head.next.next >>> has_cycle(head) True >>> head.next.next.next.next.next.next = head.next.next.next >>> has_cycle(head)", "str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next print(\"LinkedList has cycle: \" + str(has_cycle(head)))", "list of nodes Returns: whether or not the linked list", "Also: - pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py \"\"\" from pytudes._2021.utils.linked_list import ( ListNode, NodeType,", "slow = fast = head while fast is not None", "head = ListNode(\"self-edge\") >>> head.next = head >>> has_cycle(head) True", "fast is not None and fast.next is not None: #", "found the cycle else: return False def main(): head =", "<reponame>TeoZosa/pytudes \"\"\"https://www.educative.io/courses/grokking-the-coding-interview/N7rwVyAZl6D Categories: - Binary - Bit Manipulation - Blind", "Manipulation - Blind 75 See Also: - pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py \"\"\" from", "Returns: whether or not the linked list has a cycle", ">>> head.next.next.next.next.next.next = head.next.next.next >>> has_cycle(head) True \"\"\" slow =", "ListNode(\"self-edge\") >>> head.next = head >>> has_cycle(head) True >>> head", "2, 3, 4, 5, 6]) print(\"LinkedList has cycle: \" +", "head of a singly-linked list of nodes Returns: whether or", "False def main(): head = convert_list_to_linked_list([1, 2, 3, 4, 5,", "return True # found the cycle else: return False def", "False >>> head = ListNode(\"self-edge\") >>> head.next = head >>>", "= ListNode(\"self-edge\") >>> head.next = head >>> has_cycle(head) True >>>", ">>> head.next = head >>> has_cycle(head) True >>> head =", "slow = slow.next fast = fast.next.next if slow == fast:", "is not None and fast.next is not None: # since", "False >>> head.next.next.next.next.next.next = head.next.next >>> has_cycle(head) True >>> head.next.next.next.next.next.next", ">>> has_cycle(head) True \"\"\" slow = fast = head while", "# found the cycle else: return False def main(): head", "convert_list_to_linked_list([1,2,3,4,5,6]) >>> has_cycle(head) False >>> head.next.next.next.next.next.next = head.next.next >>> has_cycle(head)", "def main(): head = convert_list_to_linked_list([1, 2, 3, 4, 5, 6])", "+ str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next print(\"LinkedList has cycle: \" +", "= slow.next fast = fast.next.next if slow == fast: return", "head.next = head >>> has_cycle(head) True >>> head = convert_list_to_linked_list([1,2,3,4,5,6])", "\"\"\"https://www.educative.io/courses/grokking-the-coding-interview/N7rwVyAZl6D Categories: - Binary - Bit Manipulation - Blind 75", "+ str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next.next print(\"LinkedList has cycle: \" +", "≥ slow slow = slow.next fast = fast.next.next if slow", "= convert_list_to_linked_list([1, 2, 3, 4, 5, 6]) print(\"LinkedList has cycle:", ">>> has_cycle(head) True >>> head = convert_list_to_linked_list([1,2,3,4,5,6]) >>> has_cycle(head) False", ">>> head = convert_list_to_linked_list([1,2,3,4,5,6]) >>> has_cycle(head) False >>> head.next.next.next.next.next.next =", "Examples: >>> has_cycle(None) False >>> head = ListNode(\"self-edge\") >>> head.next", "linked list has a cycle Examples: >>> has_cycle(None) False >>>", "( ListNode, NodeType, convert_list_to_linked_list, ) def has_cycle(head: NodeType) -> bool:", "head.next.next.next >>> has_cycle(head) True \"\"\" slow = fast = head", "Binary - Bit Manipulation - Blind 75 See Also: -", ") def has_cycle(head: NodeType) -> bool: \"\"\" Args: head: head", "bool: \"\"\" Args: head: head of a singly-linked list of", "cycle Examples: >>> has_cycle(None) False >>> head = ListNode(\"self-edge\") >>>", "print(\"LinkedList has cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next.next print(\"LinkedList", "has_cycle(head) True >>> head = convert_list_to_linked_list([1,2,3,4,5,6]) >>> has_cycle(head) False >>>", "None and fast.next is not None: # since fast ≥", "list has a cycle Examples: >>> has_cycle(None) False >>> head", "return False def main(): head = convert_list_to_linked_list([1, 2, 3, 4,", "has_cycle(head: NodeType) -> bool: \"\"\" Args: head: head of a", "4, 5, 6]) print(\"LinkedList has cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next", "has_cycle(head) True \"\"\" slow = fast = head while fast", "- Bit Manipulation - Blind 75 See Also: - pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py", ">>> has_cycle(None) False >>> head = ListNode(\"self-edge\") >>> head.next =", "cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next print(\"LinkedList has cycle:", ">>> has_cycle(head) False >>> head.next.next.next.next.next.next = head.next.next >>> has_cycle(head) True", "6]) print(\"LinkedList has cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next", "3, 4, 5, 6]) print(\"LinkedList has cycle: \" + str(has_cycle(head)))", "Categories: - Binary - Bit Manipulation - Blind 75 See", "pytudes._2021.utils.linked_list import ( ListNode, NodeType, convert_list_to_linked_list, ) def has_cycle(head: NodeType)", "head: head of a singly-linked list of nodes Returns: whether", "head while fast is not None and fast.next is not", "Bit Manipulation - Blind 75 See Also: - pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py \"\"\"", "or not the linked list has a cycle Examples: >>>", "from pytudes._2021.utils.linked_list import ( ListNode, NodeType, convert_list_to_linked_list, ) def has_cycle(head:", "not None: # since fast ≥ slow slow = slow.next", "the linked list has a cycle Examples: >>> has_cycle(None) False", "cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next.next print(\"LinkedList has cycle:", "is not None: # since fast ≥ slow slow =", "not the linked list has a cycle Examples: >>> has_cycle(None)", "Blind 75 See Also: - pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py \"\"\" from pytudes._2021.utils.linked_list import", "= head while fast is not None and fast.next is", "\" + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next print(\"LinkedList has cycle: \"", "print(\"LinkedList has cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next print(\"LinkedList", ">>> has_cycle(head) True >>> head.next.next.next.next.next.next = head.next.next.next >>> has_cycle(head) True", ">>> head = ListNode(\"self-edge\") >>> head.next = head >>> has_cycle(head)", "since fast ≥ slow slow = slow.next fast = fast.next.next", "fast ≥ slow slow = slow.next fast = fast.next.next if", "5, 6]) print(\"LinkedList has cycle: \" + str(has_cycle(head))) head.next.next.next.next.next.next =", "= fast = head while fast is not None and", "\" + str(has_cycle(head))) head.next.next.next.next.next.next = head.next.next.next print(\"LinkedList has cycle: \"", "\"\"\" Args: head: head of a singly-linked list of nodes", "- Blind 75 See Also: - pytudes/_2021/leetcode/blind_75/linked_list/_141__linked_list_cycle__easy.py \"\"\" from pytudes._2021.utils.linked_list" ]
[ "input_doc_root) # Add code to start your server here threads", "= server.res_close.encode() # conn.sendall(msg) break if __name__ == '__main__': input_port", "= port self.doc_root = doc_root self.host = '127.0.0.1' self.res_200 =", "0: # path escape from file root info['url'] = '404escape'", "< 2: x.append('index.html') for d in range(len(x)): path = path", "x[i] == '..': x.remove(x[i]) x.remove(x[i - 1]) i -= 1", "server.port)) s.listen() while True: conn, addr = s.accept() t =", "send finished\") # msg = server.res_close.encode() # conn.sendall(msg) break if", "= {'act': act, 'url': url, 'version': version} for h in", "server here threads = [] with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:", "\"Last-Modified: {}\\r\\n\".format(time.ctime(os.stat(path).st_mtime)) with open(path, \"rb\") as f: data = f.read()", "'url': url, 'version': version} for h in headers: h =", "= '127.0.0.1' self.res_200 = \"HTTP/1.1 200 OK\\r\\nServer: Myserver 1.0\\r\\n\" self.res_404", "= firstline.split() except ValueError: info = {'url': '400malform'} return info", "'' x = url.split('/') i = 0 while i <", "True: req = conn.recv(1024).decode() if not req: break info =", "reqinfo['url'] # 404 escape if path == '404escape': return self.res_404", "info info = {'act': act, 'url': url, 'version': version} for", "# path escape from file root info['url'] = '404escape' return", "break if __name__ == '__main__': input_port = int(sys.argv[1]) input_doc_root =", "+ '/' + x[d] info['url'] = os.path.realpath(self.doc_root + path) return", "!= '\\r\\n\\r\\n': info = {'url': '400malform'} return info headers =", "server = MyServer(input_port, input_doc_root) # Add code to start your", "threading class MyServer: def __init__(self, port, doc_root): self.port = port", "f.read() res += \"Content-Length: {}\\r\\n\".format(len(data)) if path.split('.')[-1] == 'html': res", "') if len(h) < 2: continue field = h[0] value", "while i < len(x): if '' in x: x.remove('') if", "server.res_gen(info).encode() conn.sendall(msg) # print(\"msg send finished\") # msg = server.res_close.encode()", "== '__main__': input_port = int(sys.argv[1]) input_doc_root = sys.argv[2] server =", "MyServer(input_port, input_doc_root) # Add code to start your server here", "headers = request.splitlines() firstline = headers.pop(0) try: (act, url, version)", "h.split(': ') if len(h) < 2: continue field = h[0]", "x[d] info['url'] = os.path.realpath(self.doc_root + path) return info # generate", "Add code to start your server here threads = []", "return self.res_404 # a valid 200 req else: res =", "# 404 escape if path == '404escape': return self.res_404 #", "info headers = request.splitlines() firstline = headers.pop(0) try: (act, url,", "import socket import time import threading class MyServer: def __init__(self,", "'__main__': input_port = int(sys.argv[1]) input_doc_root = sys.argv[2] server = MyServer(input_port,", "request into dict def req_info(self, request): # 400 malform if", "url, version) = firstline.split() except ValueError: info = {'url': '400malform'}", "= h[0] value = h[1] info[field] = value # mapping", "i < len(x): if '' in x: x.remove('') if i", "404 escape if path == '404escape': return self.res_404 # 400", "\"HTTP/1.1 Connection:close\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" # map request into dict def", "Error\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_close = \"HTTP/1.1 Connection:close\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" #", "h in headers: h = h.split(': ') if len(h) <", "firstline.split() except ValueError: info = {'url': '400malform'} return info info", "escape from file root info['url'] = '404escape' return info if", "# msg = server.res_close.encode() # conn.sendall(msg) break if __name__ ==", "x.remove('') if i < 0 or x[0] == '..' or", "with conn: try: conn.settimeout(5) except socket.timeout: conn.close() # print('closed') #", "code to start your server here threads = [] with", "'' in x: x.remove('') if i < 0 or x[0]", "port, doc_root): self.port = port self.doc_root = doc_root self.host =", "info # generate response def res_gen(self, reqinfo): path = reqinfo['url']", "- 1]) i -= 1 else: i += 1 #", "'Content-Type: image/png\\r\\n\\r\\n' else: res += 'Content-Type: image/jpeg\\r\\n\\r\\n' res = res", "i < 0 or x[0] == '..' or len(x) ==", "0 or x[0] == '..' or len(x) == 0: #", "= f.read() res += \"Content-Length: {}\\r\\n\".format(len(data)) if path.split('.')[-1] == 'html':", "png if path.split('.')[-1] == 'png': res += 'Content-Type: image/png\\r\\n\\r\\n' else:", "addr) while True: req = conn.recv(1024).decode() if not req: break", "OK\\r\\nServer: Myserver 1.0\\r\\n\" self.res_404 = \"HTTP/1.1 404 NOT FOUND\\r\\nServer: Myserver", "+ path) return info # generate response def res_gen(self, reqinfo):", "= h[1] info[field] = value # mapping url, return 404", "else: i += 1 # map index.html if len(x[-1].split('.')) <", "== '..' or len(x) == 0: # path escape from", "break info = server.req_info(req) msg = server.res_gen(info).encode() conn.sendall(msg) # print(\"msg", "firstline = headers.pop(0) try: (act, url, version) = firstline.split() except", "= \"HTTP/1.1 200 OK\\r\\nServer: Myserver 1.0\\r\\n\" self.res_404 = \"HTTP/1.1 404", "addr), args=(conn, addr)) t.start() threads.append(t) for t in threads: t.join()", "= value # mapping url, return 404 escape or absolute", "'png': res += 'Content-Type: image/png\\r\\n\\r\\n' else: res += 'Content-Type: image/jpeg\\r\\n\\r\\n'", "= doc_root self.host = '127.0.0.1' self.res_200 = \"HTTP/1.1 200 OK\\r\\nServer:", "Myserver 1.0\\r\\n\\r\\n\" self.res_400 = \"HTTP/1.1 400 Client Error\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\"", "# mapping url, return 404 escape or absolute filename #", "s.accept() t = threading.Thread(target=createsocket(conn, addr), args=(conn, addr)) t.start() threads.append(t) for", "if __name__ == '__main__': input_port = int(sys.argv[1]) input_doc_root = sys.argv[2]", "res = res + str(data) return res def createsocket(conn, addr):", "res + str(data, 'utf-8') else: # for jpg and png", "404 escape or absolute filename # judge whether escape path", "self.res_200 res += \"Last-Modified: {}\\r\\n\".format(time.ctime(os.stat(path).st_mtime)) with open(path, \"rb\") as f:", "threading.Thread(target=createsocket(conn, addr), args=(conn, addr)) t.start() threads.append(t) for t in threads:", "here threads = [] with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((server.host,", "path = reqinfo['url'] # 404 escape if path == '404escape':", "sys.argv[2] server = MyServer(input_port, input_doc_root) # Add code to start", "print(\"msg send finished\") # msg = server.res_close.encode() # conn.sendall(msg) break", "return res def createsocket(conn, addr): with conn: try: conn.settimeout(5) except", "'400malform'} return info headers = request.splitlines() firstline = headers.pop(0) try:", "1.0\\r\\n\\r\\n\" self.res_400 = \"HTTP/1.1 400 Client Error\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_close", "info = {'url': '400malform'} return info info = {'act': act,", "res_gen(self, reqinfo): path = reqinfo['url'] # 404 escape if path", "request.splitlines() firstline = headers.pop(0) try: (act, url, version) = firstline.split()", "in range(len(x)): path = path + '/' + x[d] info['url']", "1.0\\r\\n\\r\\n\" self.res_close = \"HTTP/1.1 Connection:close\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" # map request", "self.res_404 # a valid 200 req else: res = self.res_200", "if '' in x: x.remove('') if i < 0 or", "ValueError: info = {'url': '400malform'} return info info = {'act':", "conn.close() # print('closed') # print('Connected by', addr) while True: req", "as s: s.bind((server.host, server.port)) s.listen() while True: conn, addr =", "= \"HTTP/1.1 400 Client Error\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_close = \"HTTP/1.1", "= os.path.realpath(self.doc_root + path) return info # generate response def", "res += \"Last-Modified: {}\\r\\n\".format(time.ctime(os.stat(path).st_mtime)) with open(path, \"rb\") as f: data", "server.res_close.encode() # conn.sendall(msg) break if __name__ == '__main__': input_port =", "conn, addr = s.accept() t = threading.Thread(target=createsocket(conn, addr), args=(conn, addr))", "sys import os import socket import time import threading class", "value = h[1] info[field] = value # mapping url, return", "response def res_gen(self, reqinfo): path = reqinfo['url'] # 404 escape", "image/png\\r\\n\\r\\n' else: res += 'Content-Type: image/jpeg\\r\\n\\r\\n' res = res +", "os import socket import time import threading class MyServer: def", "path == \"400malform\": return self.res_400 try: reqinfo['Host'] and reqinfo['User-Agent'] except", "d in range(len(x)): path = path + '/' + x[d]", "res += \"Content-Length: {}\\r\\n\".format(len(data)) if path.split('.')[-1] == 'html': res +=", "createsocket(conn, addr): with conn: try: conn.settimeout(5) except socket.timeout: conn.close() #", "time import threading class MyServer: def __init__(self, port, doc_root): self.port", "malform req if path == \"400malform\": return self.res_400 try: reqinfo['Host']", "'..': x.remove(x[i]) x.remove(x[i - 1]) i -= 1 else: i", "'404escape' return info if i < len(x) and x[i] ==", "return info headers = request.splitlines() firstline = headers.pop(0) try: (act,", "\"HTTP/1.1 404 NOT FOUND\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_400 = \"HTTP/1.1 400", "found if not os.path.isfile(path): return self.res_404 # a valid 200", "'utf-8') else: # for jpg and png if path.split('.')[-1] ==", "len(x): if '' in x: x.remove('') if i < 0", "str(data, 'utf-8') else: # for jpg and png if path.split('.')[-1]", "for h in headers: h = h.split(': ') if len(h)", "and png if path.split('.')[-1] == 'png': res += 'Content-Type: image/png\\r\\n\\r\\n'", "server.req_info(req) msg = server.res_gen(info).encode() conn.sendall(msg) # print(\"msg send finished\") #", "else: res = self.res_200 res += \"Last-Modified: {}\\r\\n\".format(time.ctime(os.stat(path).st_mtime)) with open(path,", "your server here threads = [] with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as", "malform if request[-4:] != '\\r\\n\\r\\n': info = {'url': '400malform'} return", "404 not found if not os.path.isfile(path): return self.res_404 # a", "400 malform req if path == \"400malform\": return self.res_400 try:", "-= 1 else: i += 1 # map index.html if", "url, 'version': version} for h in headers: h = h.split(':", "not req: break info = server.req_info(req) msg = server.res_gen(info).encode() conn.sendall(msg)", "self.res_400 = \"HTTP/1.1 400 Client Error\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_close =", "info[field] = value # mapping url, return 404 escape or", "= s.accept() t = threading.Thread(target=createsocket(conn, addr), args=(conn, addr)) t.start() threads.append(t)", "self.host = '127.0.0.1' self.res_200 = \"HTTP/1.1 200 OK\\r\\nServer: Myserver 1.0\\r\\n\"", "import time import threading class MyServer: def __init__(self, port, doc_root):", "res = res + str(data, 'utf-8') else: # for jpg", "port self.doc_root = doc_root self.host = '127.0.0.1' self.res_200 = \"HTTP/1.1", "in headers: h = h.split(': ') if len(h) < 2:", "res += 'Content-Type: image/jpeg\\r\\n\\r\\n' res = res + str(data) return", "'version': version} for h in headers: h = h.split(': ')", "text/html\\r\\n\\r\\n' res = res + str(data, 'utf-8') else: # for", "def res_gen(self, reqinfo): path = reqinfo['url'] # 404 escape if", "# 404 not found if not os.path.isfile(path): return self.res_404 #", "0 while i < len(x): if '' in x: x.remove('')", "with open(path, \"rb\") as f: data = f.read() res +=", "== '..': x.remove(x[i]) x.remove(x[i - 1]) i -= 1 else:", "print('Connected by', addr) while True: req = conn.recv(1024).decode() if not", "Connection:close\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" # map request into dict def req_info(self,", "f: data = f.read() res += \"Content-Length: {}\\r\\n\".format(len(data)) if path.split('.')[-1]", "+ str(data, 'utf-8') else: # for jpg and png if", "try: conn.settimeout(5) except socket.timeout: conn.close() # print('closed') # print('Connected by',", "= threading.Thread(target=createsocket(conn, addr), args=(conn, addr)) t.start() threads.append(t) for t in", "< len(x) and x[i] == '..': x.remove(x[i]) x.remove(x[i - 1])", "<filename>httpd.py import sys import os import socket import time import", "escape if path == '404escape': return self.res_404 # 400 malform", "'127.0.0.1' self.res_200 = \"HTTP/1.1 200 OK\\r\\nServer: Myserver 1.0\\r\\n\" self.res_404 =", "= 0 while i < len(x): if '' in x:", "escape path = '' x = url.split('/') i = 0", "1.0\\r\\n\" self.res_404 = \"HTTP/1.1 404 NOT FOUND\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_400", "if path.split('.')[-1] == 'html': res += 'Content-Type: text/html\\r\\n\\r\\n' res =", "info['url'] = '404escape' return info if i < len(x) and", "res + str(data) return res def createsocket(conn, addr): with conn:", "+ x[d] info['url'] = os.path.realpath(self.doc_root + path) return info #", "\"HTTP/1.1 400 Client Error\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_close = \"HTTP/1.1 Connection:close\\r\\nServer:", "if i < len(x) and x[i] == '..': x.remove(x[i]) x.remove(x[i", "200 OK\\r\\nServer: Myserver 1.0\\r\\n\" self.res_404 = \"HTTP/1.1 404 NOT FOUND\\r\\nServer:", "s.bind((server.host, server.port)) s.listen() while True: conn, addr = s.accept() t", "if not req: break info = server.req_info(req) msg = server.res_gen(info).encode()", "== 'html': res += 'Content-Type: text/html\\r\\n\\r\\n' res = res +", "req else: res = self.res_200 res += \"Last-Modified: {}\\r\\n\".format(time.ctime(os.stat(path).st_mtime)) with", "res = self.res_200 res += \"Last-Modified: {}\\r\\n\".format(time.ctime(os.stat(path).st_mtime)) with open(path, \"rb\")", "= {'url': '400malform'} return info headers = request.splitlines() firstline =", "= path + '/' + x[d] info['url'] = os.path.realpath(self.doc_root +", "path) return info # generate response def res_gen(self, reqinfo): path", "map request into dict def req_info(self, request): # 400 malform", "\"Content-Length: {}\\r\\n\".format(len(data)) if path.split('.')[-1] == 'html': res += 'Content-Type: text/html\\r\\n\\r\\n'", "file root info['url'] = '404escape' return info if i <", "2: x.append('index.html') for d in range(len(x)): path = path +", "__name__ == '__main__': input_port = int(sys.argv[1]) input_doc_root = sys.argv[2] server", "if request[-4:] != '\\r\\n\\r\\n': info = {'url': '400malform'} return info", "doc_root self.host = '127.0.0.1' self.res_200 = \"HTTP/1.1 200 OK\\r\\nServer: Myserver", "# print('Connected by', addr) while True: req = conn.recv(1024).decode() if", "path.split('.')[-1] == 'png': res += 'Content-Type: image/png\\r\\n\\r\\n' else: res +=", "x[0] == '..' or len(x) == 0: # path escape", "path.split('.')[-1] == 'html': res += 'Content-Type: text/html\\r\\n\\r\\n' res = res", "Myserver 1.0\\r\\n\\r\\n\" # map request into dict def req_info(self, request):", "res def createsocket(conn, addr): with conn: try: conn.settimeout(5) except socket.timeout:", "len(x) and x[i] == '..': x.remove(x[i]) x.remove(x[i - 1]) i", "req = conn.recv(1024).decode() if not req: break info = server.req_info(req)", "as f: data = f.read() res += \"Content-Length: {}\\r\\n\".format(len(data)) if", "1 # map index.html if len(x[-1].split('.')) < 2: x.append('index.html') for", "= server.req_info(req) msg = server.res_gen(info).encode() conn.sendall(msg) # print(\"msg send finished\")", "in x: x.remove('') if i < 0 or x[0] ==", "input_doc_root = sys.argv[2] server = MyServer(input_port, input_doc_root) # Add code", "h[0] value = h[1] info[field] = value # mapping url,", "MyServer: def __init__(self, port, doc_root): self.port = port self.doc_root =", "conn.recv(1024).decode() if not req: break info = server.req_info(req) msg =", "reqinfo['Host'] and reqinfo['User-Agent'] except KeyError: return self.res_400 # 404 not", "h[1] info[field] = value # mapping url, return 404 escape", "if path.split('.')[-1] == 'png': res += 'Content-Type: image/png\\r\\n\\r\\n' else: res", "< 2: continue field = h[0] value = h[1] info[field]", "# judge whether escape path = '' x = url.split('/')", "root info['url'] = '404escape' return info if i < len(x)", "== '404escape': return self.res_404 # 400 malform req if path", "'Content-Type: text/html\\r\\n\\r\\n' res = res + str(data, 'utf-8') else: #", "'html': res += 'Content-Type: text/html\\r\\n\\r\\n' res = res + str(data,", "with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((server.host, server.port)) s.listen() while True:", "= \"HTTP/1.1 404 NOT FOUND\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_400 = \"HTTP/1.1", "i < len(x) and x[i] == '..': x.remove(x[i]) x.remove(x[i -", "# 400 malform req if path == \"400malform\": return self.res_400", "to start your server here threads = [] with socket.socket(socket.AF_INET,", "else: res += 'Content-Type: image/jpeg\\r\\n\\r\\n' res = res + str(data)", "str(data) return res def createsocket(conn, addr): with conn: try: conn.settimeout(5)", "\"400malform\": return self.res_400 try: reqinfo['Host'] and reqinfo['User-Agent'] except KeyError: return", "info if i < len(x) and x[i] == '..': x.remove(x[i])", "'400malform'} return info info = {'act': act, 'url': url, 'version':", "try: reqinfo['Host'] and reqinfo['User-Agent'] except KeyError: return self.res_400 # 404", "< 0 or x[0] == '..' or len(x) == 0:", "x = url.split('/') i = 0 while i < len(x):", "len(x[-1].split('.')) < 2: x.append('index.html') for d in range(len(x)): path =", "socket.SOCK_STREAM) as s: s.bind((server.host, server.port)) s.listen() while True: conn, addr", "Client Error\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_close = \"HTTP/1.1 Connection:close\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\"", "or x[0] == '..' or len(x) == 0: # path", "400 Client Error\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_close = \"HTTP/1.1 Connection:close\\r\\nServer: Myserver", "== 'png': res += 'Content-Type: image/png\\r\\n\\r\\n' else: res += 'Content-Type:", "continue field = h[0] value = h[1] info[field] = value", "dict def req_info(self, request): # 400 malform if request[-4:] !=", "path = path + '/' + x[d] info['url'] = os.path.realpath(self.doc_root", "404 NOT FOUND\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_400 = \"HTTP/1.1 400 Client", "len(h) < 2: continue field = h[0] value = h[1]", "# generate response def res_gen(self, reqinfo): path = reqinfo['url'] #", "msg = server.res_gen(info).encode() conn.sendall(msg) # print(\"msg send finished\") # msg", "{}\\r\\n\".format(len(data)) if path.split('.')[-1] == 'html': res += 'Content-Type: text/html\\r\\n\\r\\n' res", "+= 'Content-Type: image/png\\r\\n\\r\\n' else: res += 'Content-Type: image/jpeg\\r\\n\\r\\n' res =", "== \"400malform\": return self.res_400 try: reqinfo['Host'] and reqinfo['User-Agent'] except KeyError:", "input_port = int(sys.argv[1]) input_doc_root = sys.argv[2] server = MyServer(input_port, input_doc_root)", "= [] with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((server.host, server.port)) s.listen()", "self.res_close = \"HTTP/1.1 Connection:close\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" # map request into", "for jpg and png if path.split('.')[-1] == 'png': res +=", "data = f.read() res += \"Content-Length: {}\\r\\n\".format(len(data)) if path.split('.')[-1] ==", "request): # 400 malform if request[-4:] != '\\r\\n\\r\\n': info =", "{'url': '400malform'} return info headers = request.splitlines() firstline = headers.pop(0)", "jpg and png if path.split('.')[-1] == 'png': res += 'Content-Type:", "addr): with conn: try: conn.settimeout(5) except socket.timeout: conn.close() # print('closed')", "Myserver 1.0\\r\\n\\r\\n\" self.res_close = \"HTTP/1.1 Connection:close\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" # map", "or absolute filename # judge whether escape path = ''", "conn.settimeout(5) except socket.timeout: conn.close() # print('closed') # print('Connected by', addr)", "version) = firstline.split() except ValueError: info = {'url': '400malform'} return", "x.remove(x[i]) x.remove(x[i - 1]) i -= 1 else: i +=", "[] with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((server.host, server.port)) s.listen() while", "generate response def res_gen(self, reqinfo): path = reqinfo['url'] # 404", "req_info(self, request): # 400 malform if request[-4:] != '\\r\\n\\r\\n': info", "req if path == \"400malform\": return self.res_400 try: reqinfo['Host'] and", "2: continue field = h[0] value = h[1] info[field] =", "self.doc_root = doc_root self.host = '127.0.0.1' self.res_200 = \"HTTP/1.1 200", "socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((server.host, server.port)) s.listen() while True: conn,", "not found if not os.path.isfile(path): return self.res_404 # a valid", "import sys import os import socket import time import threading", "def createsocket(conn, addr): with conn: try: conn.settimeout(5) except socket.timeout: conn.close()", "request[-4:] != '\\r\\n\\r\\n': info = {'url': '400malform'} return info headers", "'Content-Type: image/jpeg\\r\\n\\r\\n' res = res + str(data) return res def", "# conn.sendall(msg) break if __name__ == '__main__': input_port = int(sys.argv[1])", "headers: h = h.split(': ') if len(h) < 2: continue", "= reqinfo['url'] # 404 escape if path == '404escape': return", "# map index.html if len(x[-1].split('.')) < 2: x.append('index.html') for d", "by', addr) while True: req = conn.recv(1024).decode() if not req:", "self.res_200 = \"HTTP/1.1 200 OK\\r\\nServer: Myserver 1.0\\r\\n\" self.res_404 = \"HTTP/1.1", "t = threading.Thread(target=createsocket(conn, addr), args=(conn, addr)) t.start() threads.append(t) for t", "res += 'Content-Type: image/png\\r\\n\\r\\n' else: res += 'Content-Type: image/jpeg\\r\\n\\r\\n' res", "path + '/' + x[d] info['url'] = os.path.realpath(self.doc_root + path)", "return info if i < len(x) and x[i] == '..':", "200 req else: res = self.res_200 res += \"Last-Modified: {}\\r\\n\".format(time.ctime(os.stat(path).st_mtime))", "= request.splitlines() firstline = headers.pop(0) try: (act, url, version) =", "map index.html if len(x[-1].split('.')) < 2: x.append('index.html') for d in", "'404escape': return self.res_404 # 400 malform req if path ==", "def req_info(self, request): # 400 malform if request[-4:] != '\\r\\n\\r\\n':", "return info info = {'act': act, 'url': url, 'version': version}", "mapping url, return 404 escape or absolute filename # judge", "# for jpg and png if path.split('.')[-1] == 'png': res", "< len(x): if '' in x: x.remove('') if i <", "= server.res_gen(info).encode() conn.sendall(msg) # print(\"msg send finished\") # msg =", "x: x.remove('') if i < 0 or x[0] == '..'", "return self.res_404 # 400 malform req if path == \"400malform\":", "try: (act, url, version) = firstline.split() except ValueError: info =", "self.res_404 # 400 malform req if path == \"400malform\": return", "headers.pop(0) try: (act, url, version) = firstline.split() except ValueError: info", "escape or absolute filename # judge whether escape path =", "1.0\\r\\n\\r\\n\" # map request into dict def req_info(self, request): #", "if not os.path.isfile(path): return self.res_404 # a valid 200 req", "act, 'url': url, 'version': version} for h in headers: h", "field = h[0] value = h[1] info[field] = value #", "or len(x) == 0: # path escape from file root", "finished\") # msg = server.res_close.encode() # conn.sendall(msg) break if __name__", "= headers.pop(0) try: (act, url, version) = firstline.split() except ValueError:", "len(x) == 0: # path escape from file root info['url']", "== 0: # path escape from file root info['url'] =", "and x[i] == '..': x.remove(x[i]) x.remove(x[i - 1]) i -=", "not os.path.isfile(path): return self.res_404 # a valid 200 req else:", "conn.sendall(msg) break if __name__ == '__main__': input_port = int(sys.argv[1]) input_doc_root", "= h.split(': ') if len(h) < 2: continue field =", "open(path, \"rb\") as f: data = f.read() res += \"Content-Length:", "+= \"Last-Modified: {}\\r\\n\".format(time.ctime(os.stat(path).st_mtime)) with open(path, \"rb\") as f: data =", "return 404 escape or absolute filename # judge whether escape", "i -= 1 else: i += 1 # map index.html", "= MyServer(input_port, input_doc_root) # Add code to start your server", "\"rb\") as f: data = f.read() res += \"Content-Length: {}\\r\\n\".format(len(data))", "threads = [] with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s: s.bind((server.host, server.port))", "\"HTTP/1.1 200 OK\\r\\nServer: Myserver 1.0\\r\\n\" self.res_404 = \"HTTP/1.1 404 NOT", "return self.res_400 try: reqinfo['Host'] and reqinfo['User-Agent'] except KeyError: return self.res_400", "+= 'Content-Type: image/jpeg\\r\\n\\r\\n' res = res + str(data) return res", "value # mapping url, return 404 escape or absolute filename", "return self.res_400 # 404 not found if not os.path.isfile(path): return", "path == '404escape': return self.res_404 # 400 malform req if", "# 400 malform if request[-4:] != '\\r\\n\\r\\n': info = {'url':", "judge whether escape path = '' x = url.split('/') i", "reqinfo): path = reqinfo['url'] # 404 escape if path ==", "while True: conn, addr = s.accept() t = threading.Thread(target=createsocket(conn, addr),", "msg = server.res_close.encode() # conn.sendall(msg) break if __name__ == '__main__':", "x.append('index.html') for d in range(len(x)): path = path + '/'", "= res + str(data, 'utf-8') else: # for jpg and", "i += 1 # map index.html if len(x[-1].split('.')) < 2:", "version} for h in headers: h = h.split(': ') if", "socket.timeout: conn.close() # print('closed') # print('Connected by', addr) while True:", "= conn.recv(1024).decode() if not req: break info = server.req_info(req) msg", "if i < 0 or x[0] == '..' or len(x)", "# print('closed') # print('Connected by', addr) while True: req =", "for d in range(len(x)): path = path + '/' +", "reqinfo['User-Agent'] except KeyError: return self.res_400 # 404 not found if", "'/' + x[d] info['url'] = os.path.realpath(self.doc_root + path) return info", "and reqinfo['User-Agent'] except KeyError: return self.res_400 # 404 not found", "int(sys.argv[1]) input_doc_root = sys.argv[2] server = MyServer(input_port, input_doc_root) # Add", "print('closed') # print('Connected by', addr) while True: req = conn.recv(1024).decode()", "def __init__(self, port, doc_root): self.port = port self.doc_root = doc_root", "= \"HTTP/1.1 Connection:close\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" # map request into dict", "req: break info = server.req_info(req) msg = server.res_gen(info).encode() conn.sendall(msg) #", "'..' or len(x) == 0: # path escape from file", "valid 200 req else: res = self.res_200 res += \"Last-Modified:", "= url.split('/') i = 0 while i < len(x): if", "self.res_400 try: reqinfo['Host'] and reqinfo['User-Agent'] except KeyError: return self.res_400 #", "= int(sys.argv[1]) input_doc_root = sys.argv[2] server = MyServer(input_port, input_doc_root) #", "__init__(self, port, doc_root): self.port = port self.doc_root = doc_root self.host", "url.split('/') i = 0 while i < len(x): if ''", "except KeyError: return self.res_400 # 404 not found if not", "= {'url': '400malform'} return info info = {'act': act, 'url':", "1 else: i += 1 # map index.html if len(x[-1].split('.'))", "if path == '404escape': return self.res_404 # 400 malform req", "a valid 200 req else: res = self.res_200 res +=", "doc_root): self.port = port self.doc_root = doc_root self.host = '127.0.0.1'", "x.remove(x[i - 1]) i -= 1 else: i += 1", "if len(h) < 2: continue field = h[0] value =", "= '' x = url.split('/') i = 0 while i", "whether escape path = '' x = url.split('/') i =", "1]) i -= 1 else: i += 1 # map", "= res + str(data) return res def createsocket(conn, addr): with", "s: s.bind((server.host, server.port)) s.listen() while True: conn, addr = s.accept()", "(act, url, version) = firstline.split() except ValueError: info = {'url':", "range(len(x)): path = path + '/' + x[d] info['url'] =", "True: conn, addr = s.accept() t = threading.Thread(target=createsocket(conn, addr), args=(conn,", "class MyServer: def __init__(self, port, doc_root): self.port = port self.doc_root", "into dict def req_info(self, request): # 400 malform if request[-4:]", "{'act': act, 'url': url, 'version': version} for h in headers:", "# a valid 200 req else: res = self.res_200 res", "NOT FOUND\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_400 = \"HTTP/1.1 400 Client Error\\r\\nServer:", "s.listen() while True: conn, addr = s.accept() t = threading.Thread(target=createsocket(conn,", "= sys.argv[2] server = MyServer(input_port, input_doc_root) # Add code to", "+= \"Content-Length: {}\\r\\n\".format(len(data)) if path.split('.')[-1] == 'html': res += 'Content-Type:", "self.res_400 # 404 not found if not os.path.isfile(path): return self.res_404", "{}\\r\\n\".format(time.ctime(os.stat(path).st_mtime)) with open(path, \"rb\") as f: data = f.read() res", "h = h.split(': ') if len(h) < 2: continue field", "# Add code to start your server here threads =", "import os import socket import time import threading class MyServer:", "filename # judge whether escape path = '' x =", "addr = s.accept() t = threading.Thread(target=createsocket(conn, addr), args=(conn, addr)) t.start()", "info['url'] = os.path.realpath(self.doc_root + path) return info # generate response", "conn: try: conn.settimeout(5) except socket.timeout: conn.close() # print('closed') # print('Connected", "start your server here threads = [] with socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "path escape from file root info['url'] = '404escape' return info", "os.path.isfile(path): return self.res_404 # a valid 200 req else: res", "except socket.timeout: conn.close() # print('closed') # print('Connected by', addr) while", "import threading class MyServer: def __init__(self, port, doc_root): self.port =", "from file root info['url'] = '404escape' return info if i", "self.port = port self.doc_root = doc_root self.host = '127.0.0.1' self.res_200", "url, return 404 escape or absolute filename # judge whether", "except ValueError: info = {'url': '400malform'} return info info =", "= '404escape' return info if i < len(x) and x[i]", "+= 'Content-Type: text/html\\r\\n\\r\\n' res = res + str(data, 'utf-8') else:", "FOUND\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_400 = \"HTTP/1.1 400 Client Error\\r\\nServer: Myserver", "self.res_404 = \"HTTP/1.1 404 NOT FOUND\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\" self.res_400 =", "KeyError: return self.res_400 # 404 not found if not os.path.isfile(path):", "os.path.realpath(self.doc_root + path) return info # generate response def res_gen(self,", "i = 0 while i < len(x): if '' in", "while True: req = conn.recv(1024).decode() if not req: break info", "conn.sendall(msg) # print(\"msg send finished\") # msg = server.res_close.encode() #", "info = {'url': '400malform'} return info headers = request.splitlines() firstline", "# map request into dict def req_info(self, request): # 400", "= self.res_200 res += \"Last-Modified: {}\\r\\n\".format(time.ctime(os.stat(path).st_mtime)) with open(path, \"rb\") as", "# print(\"msg send finished\") # msg = server.res_close.encode() # conn.sendall(msg)", "400 malform if request[-4:] != '\\r\\n\\r\\n': info = {'url': '400malform'}", "+= 1 # map index.html if len(x[-1].split('.')) < 2: x.append('index.html')", "{'url': '400malform'} return info info = {'act': act, 'url': url,", "else: # for jpg and png if path.split('.')[-1] == 'png':", "path = '' x = url.split('/') i = 0 while", "index.html if len(x[-1].split('.')) < 2: x.append('index.html') for d in range(len(x)):", "info = server.req_info(req) msg = server.res_gen(info).encode() conn.sendall(msg) # print(\"msg send", "return info # generate response def res_gen(self, reqinfo): path =", "image/jpeg\\r\\n\\r\\n' res = res + str(data) return res def createsocket(conn,", "socket import time import threading class MyServer: def __init__(self, port,", "res += 'Content-Type: text/html\\r\\n\\r\\n' res = res + str(data, 'utf-8')", "'\\r\\n\\r\\n': info = {'url': '400malform'} return info headers = request.splitlines()", "absolute filename # judge whether escape path = '' x", "if path == \"400malform\": return self.res_400 try: reqinfo['Host'] and reqinfo['User-Agent']", "Myserver 1.0\\r\\n\" self.res_404 = \"HTTP/1.1 404 NOT FOUND\\r\\nServer: Myserver 1.0\\r\\n\\r\\n\"", "if len(x[-1].split('.')) < 2: x.append('index.html') for d in range(len(x)): path", "+ str(data) return res def createsocket(conn, addr): with conn: try:", "info = {'act': act, 'url': url, 'version': version} for h" ]
[ "https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py \"\"\" def reset(self): pass def add(self): pass def value(self):", "\"\"\" def reset(self): pass def add(self): pass def value(self): pass", "class Metric(object): \"\"\"Base class for all metrics. From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py \"\"\"", "<gh_stars>0 class Metric(object): \"\"\"Base class for all metrics. From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py", "for all metrics. From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py \"\"\" def reset(self): pass def", "class for all metrics. From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py \"\"\" def reset(self): pass", "Metric(object): \"\"\"Base class for all metrics. From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py \"\"\" def", "all metrics. From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py \"\"\" def reset(self): pass def add(self):", "metrics. From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py \"\"\" def reset(self): pass def add(self): pass", "From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py \"\"\" def reset(self): pass def add(self): pass def", "\"\"\"Base class for all metrics. From: https://github.com/pytorch/tnt/blob/master/torchnet/meter/meter.py \"\"\" def reset(self):" ]
[ "< 0: last_suffix_index = len(url) if last_slash_index < 0 or", "project: \" + repo_name + \", Branch: \" + branch)", "Branch: \" + branch) Repo.clone_from(url, branch=branch, to_path=path) else: console.success(repo_name +", "get_repo_name_from_url(self, url: str): if not url: return None last_slash_index =", "or last_suffix_index <= last_slash_index: raise Exception(\"Invalid repo url {}\".format(url)) return", "repo\") if not PFPFFileUtil.is_exist(path): console.success(\"Cloning project: \" + repo_name +", "Exception(\"Invalid repo url {}\".format(url)) return url[last_slash_index + 1:last_suffix_index] def clone_or_pull_project(self,", "clone_or_pull_project(self, path, url, branch): repo_name = self.get_repo_name_from_url(url) if not repo_name:", "if not PFPFFileUtil.is_exist(path): console.success(\"Cloning project: \" + repo_name + \",", "url[last_slash_index + 1:last_suffix_index] def clone_or_pull_project(self, path, url, branch): repo_name =", "len(url) if last_slash_index < 0 or last_suffix_index <= last_slash_index: raise", "pf_py_file.pfpf_file_util import PFPFFileUtil class GitRepoMan: def get_repo_name_from_url(self, url: str): if", "< 0 or last_suffix_index <= last_slash_index: raise Exception(\"Invalid repo url", "Taking pull...\") repo = Repo(path) repo.git.checkout(branch) origin = repo.remotes.origin origin.pull()", "from pf_pweb_sourceman.common.console import console from pf_py_file.pfpf_file_util import PFPFFileUtil class GitRepoMan:", "branch): repo_name = self.get_repo_name_from_url(url) if not repo_name: raise Exception(\"Invalid repo\")", "= self.get_repo_name_from_url(url) if not repo_name: raise Exception(\"Invalid repo\") if not", "if last_suffix_index < 0: last_suffix_index = len(url) if last_slash_index <", "class GitRepoMan: def get_repo_name_from_url(self, url: str): if not url: return", "GitRepoMan: def get_repo_name_from_url(self, url: str): if not url: return None", "if not repo_name: raise Exception(\"Invalid repo\") if not PFPFFileUtil.is_exist(path): console.success(\"Cloning", "Repo.clone_from(url, branch=branch, to_path=path) else: console.success(repo_name + \" Taking pull...\") repo", "\" + repo_name + \", Branch: \" + branch) Repo.clone_from(url,", "+ branch) Repo.clone_from(url, branch=branch, to_path=path) else: console.success(repo_name + \" Taking", "if not url: return None last_slash_index = url.rfind(\"/\") last_suffix_index =", "to_path=path) else: console.success(repo_name + \" Taking pull...\") repo = Repo(path)", "last_slash_index = url.rfind(\"/\") last_suffix_index = url.rfind(\".git\") if last_suffix_index < 0:", "url: str): if not url: return None last_slash_index = url.rfind(\"/\")", "{}\".format(url)) return url[last_slash_index + 1:last_suffix_index] def clone_or_pull_project(self, path, url, branch):", "= url.rfind(\"/\") last_suffix_index = url.rfind(\".git\") if last_suffix_index < 0: last_suffix_index", "0 or last_suffix_index <= last_slash_index: raise Exception(\"Invalid repo url {}\".format(url))", "branch) Repo.clone_from(url, branch=branch, to_path=path) else: console.success(repo_name + \" Taking pull...\")", "import PFPFFileUtil class GitRepoMan: def get_repo_name_from_url(self, url: str): if not", "last_suffix_index = url.rfind(\".git\") if last_suffix_index < 0: last_suffix_index = len(url)", "repo_name = self.get_repo_name_from_url(url) if not repo_name: raise Exception(\"Invalid repo\") if", "git import Repo from pf_pweb_sourceman.common.console import console from pf_py_file.pfpf_file_util import", "url {}\".format(url)) return url[last_slash_index + 1:last_suffix_index] def clone_or_pull_project(self, path, url,", "raise Exception(\"Invalid repo\") if not PFPFFileUtil.is_exist(path): console.success(\"Cloning project: \" +", "\", Branch: \" + branch) Repo.clone_from(url, branch=branch, to_path=path) else: console.success(repo_name", "branch=branch, to_path=path) else: console.success(repo_name + \" Taking pull...\") repo =", "Repo from pf_pweb_sourceman.common.console import console from pf_py_file.pfpf_file_util import PFPFFileUtil class", "path, url, branch): repo_name = self.get_repo_name_from_url(url) if not repo_name: raise", "+ \", Branch: \" + branch) Repo.clone_from(url, branch=branch, to_path=path) else:", "0: last_suffix_index = len(url) if last_slash_index < 0 or last_suffix_index", "def clone_or_pull_project(self, path, url, branch): repo_name = self.get_repo_name_from_url(url) if not", "PFPFFileUtil.is_exist(path): console.success(\"Cloning project: \" + repo_name + \", Branch: \"", "console.success(repo_name + \" Taking pull...\") repo = Repo(path) repo.git.checkout(branch) origin", "<= last_slash_index: raise Exception(\"Invalid repo url {}\".format(url)) return url[last_slash_index +", "last_slash_index: raise Exception(\"Invalid repo url {}\".format(url)) return url[last_slash_index + 1:last_suffix_index]", "return None last_slash_index = url.rfind(\"/\") last_suffix_index = url.rfind(\".git\") if last_suffix_index", "if last_slash_index < 0 or last_suffix_index <= last_slash_index: raise Exception(\"Invalid", "pf_pweb_sourceman.common.console import console from pf_py_file.pfpf_file_util import PFPFFileUtil class GitRepoMan: def", "last_suffix_index < 0: last_suffix_index = len(url) if last_slash_index < 0", "repo url {}\".format(url)) return url[last_slash_index + 1:last_suffix_index] def clone_or_pull_project(self, path,", "repo_name + \", Branch: \" + branch) Repo.clone_from(url, branch=branch, to_path=path)", "\" + branch) Repo.clone_from(url, branch=branch, to_path=path) else: console.success(repo_name + \"", "raise Exception(\"Invalid repo url {}\".format(url)) return url[last_slash_index + 1:last_suffix_index] def", "+ \" Taking pull...\") repo = Repo(path) repo.git.checkout(branch) origin =", "1:last_suffix_index] def clone_or_pull_project(self, path, url, branch): repo_name = self.get_repo_name_from_url(url) if", "+ 1:last_suffix_index] def clone_or_pull_project(self, path, url, branch): repo_name = self.get_repo_name_from_url(url)", "last_slash_index < 0 or last_suffix_index <= last_slash_index: raise Exception(\"Invalid repo", "not url: return None last_slash_index = url.rfind(\"/\") last_suffix_index = url.rfind(\".git\")", "console.success(\"Cloning project: \" + repo_name + \", Branch: \" +", "repo_name: raise Exception(\"Invalid repo\") if not PFPFFileUtil.is_exist(path): console.success(\"Cloning project: \"", "from pf_py_file.pfpf_file_util import PFPFFileUtil class GitRepoMan: def get_repo_name_from_url(self, url: str):", "url.rfind(\"/\") last_suffix_index = url.rfind(\".git\") if last_suffix_index < 0: last_suffix_index =", "Exception(\"Invalid repo\") if not PFPFFileUtil.is_exist(path): console.success(\"Cloning project: \" + repo_name", "last_suffix_index = len(url) if last_slash_index < 0 or last_suffix_index <=", "not repo_name: raise Exception(\"Invalid repo\") if not PFPFFileUtil.is_exist(path): console.success(\"Cloning project:", "import console from pf_py_file.pfpf_file_util import PFPFFileUtil class GitRepoMan: def get_repo_name_from_url(self,", "None last_slash_index = url.rfind(\"/\") last_suffix_index = url.rfind(\".git\") if last_suffix_index <", "console from pf_py_file.pfpf_file_util import PFPFFileUtil class GitRepoMan: def get_repo_name_from_url(self, url:", "self.get_repo_name_from_url(url) if not repo_name: raise Exception(\"Invalid repo\") if not PFPFFileUtil.is_exist(path):", "\" Taking pull...\") repo = Repo(path) repo.git.checkout(branch) origin = repo.remotes.origin", "url: return None last_slash_index = url.rfind(\"/\") last_suffix_index = url.rfind(\".git\") if", "last_suffix_index <= last_slash_index: raise Exception(\"Invalid repo url {}\".format(url)) return url[last_slash_index", "not PFPFFileUtil.is_exist(path): console.success(\"Cloning project: \" + repo_name + \", Branch:", "PFPFFileUtil class GitRepoMan: def get_repo_name_from_url(self, url: str): if not url:", "else: console.success(repo_name + \" Taking pull...\") repo = Repo(path) repo.git.checkout(branch)", "from git import Repo from pf_pweb_sourceman.common.console import console from pf_py_file.pfpf_file_util", "url.rfind(\".git\") if last_suffix_index < 0: last_suffix_index = len(url) if last_slash_index", "str): if not url: return None last_slash_index = url.rfind(\"/\") last_suffix_index", "def get_repo_name_from_url(self, url: str): if not url: return None last_slash_index", "= url.rfind(\".git\") if last_suffix_index < 0: last_suffix_index = len(url) if", "+ repo_name + \", Branch: \" + branch) Repo.clone_from(url, branch=branch,", "= len(url) if last_slash_index < 0 or last_suffix_index <= last_slash_index:", "url, branch): repo_name = self.get_repo_name_from_url(url) if not repo_name: raise Exception(\"Invalid", "import Repo from pf_pweb_sourceman.common.console import console from pf_py_file.pfpf_file_util import PFPFFileUtil", "return url[last_slash_index + 1:last_suffix_index] def clone_or_pull_project(self, path, url, branch): repo_name" ]
[ "= { 'Hostname': [], 'IP': [], 'MAC': [], 'Lastseen': [],", "remote_data_1(): output_1 = stdout_1.readlines() output_2 = stdout_2.readlines() output_3 = stdout_3.readlines()", "-- tells us if the host is actually # reachable", "try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=Hostname, username=Username, key_filename=key) except paramiko.AuthenticationException:", "pass return \"Please Connect to the Internet!\" is_connected() try: ssh", "= ssh.exec_command(\"hostname -I | awk '{print $1}'\") _, stdout_3, _", "[], 'Lastseen': [], 'Status': [], } for i in output:", "\"awk -F= '$1=={} {{ print $2 ;}}' /etc/os-release\".format('\"NAME\"')) _, stdout_5,", "host -- tells us if the host is actually #", "_ = ssh.exec_command(\"hostname\") _, stdout_2, _ = ssh.exec_command(\"hostname -I |", "'{print $1}'\") _, stdout_3, _ = ssh.exec_command(\"cat /sys/class/net/eth0/address\") _, stdout_4,", "stdout_5, _ = ssh.exec_command(\"whoami\") _, stdout_6, _ = ssh.exec_command(\"last -F\")", "= stdout_4.readlines() output_5 = stdout_5.readlines() remote_data_1 = { 'Hostname': '',", "'IP': '', 'MAC': '', 'OS': '', 'Currentuser': '', } remote_data_1['Hostname']", "remote_data_2['MAC'].append('not found') remote_data_2['Lastseen'].append(' '.join(filter_[i][3:8])) if 'logged' in filter_[i][9]: remote_data_2['Status'].append('Active') else:", "{ 'Hostname': '', 'IP': '', 'MAC': '', 'OS': '', 'Currentuser':", "output_1 = stdout_1.readlines() output_2 = stdout_2.readlines() output_3 = stdout_3.readlines() output_4", "_ = ssh.exec_command(\"cat /sys/class/net/eth0/address\") _, stdout_4, _ = ssh.exec_command( \"awk", "print $2 ;}}' /etc/os-release\".format('\"NAME\"')) _, stdout_5, _ = ssh.exec_command(\"whoami\") _,", "stdout_1.readlines() output_2 = stdout_2.readlines() output_3 = stdout_3.readlines() output_4 = stdout_4.readlines()", "is_connected(): try: # connect to the host -- tells us", "actually # reachable socket.create_connection((\"8.8.8.8\", 53)) return \"conneted to the Internet!\"", "{ 'Hostname': [], 'IP': [], 'MAC': [], 'Lastseen': [], 'Status':", "output_1[0].strip('\\n') remote_data_1['IP'] = output_2[0].strip('\\n') remote_data_1['MAC'] = output_3[0].strip('\\n') remote_data_1['OS'] = output_4[0][1:-1].strip('\\\"')", "filter_.append(list(filter(None, i))) for i in range(len(filter_)-3): remote_data_2['Hostname'].append(filter_[i][0]) remote_data_2['IP'].append(filter_[i][2]) remote_data_2['MAC'].append('not found')", "stdout_5.readlines() remote_data_1 = { 'Hostname': '', 'IP': '', 'MAC': '',", "to connect to {} due to wrong username/password\".format(Hostname)) exit(1) except:", "username/password\".format(Hostname)) exit(1) except: print(\"Failed to connect to {} \".format(Hostname)) exit(2)", "import socket import paramiko import json Hostname = '172.16.17.32' Username", "remote_data_1['Hostname'] = output_1[0].strip('\\n') remote_data_1['IP'] = output_2[0].strip('\\n') remote_data_1['MAC'] = output_3[0].strip('\\n') remote_data_1['OS']", "= output_5[0].strip('\\n') return json.dumps(remote_data_1, indent=4) # ---------------------------------- def remote_data_2_(): output", "output: data_.append(i.split(' ')) for i in data_: filter_.append(list(filter(None, i))) for", "except OSError: pass return \"Please Connect to the Internet!\" is_connected()", "#_, stdout_8, _ = ssh.exec_command(\"sudo {}/24\".format()) # egrep -o '([0-9]{1,3}\\.){3}[0-9]{1,3}'", "'MAC': '', 'OS': '', 'Currentuser': '', } remote_data_1['Hostname'] = output_1[0].strip('\\n')", "= output_4[0][1:-1].strip('\\\"') remote_data_1['Currentuser'] = output_5[0].strip('\\n') return json.dumps(remote_data_1, indent=4) # ----------------------------------", "[], 'IP': [], 'MAC': [], 'Lastseen': [], 'Status': [], }", "[] remote_data_2 = { 'Hostname': [], 'IP': [], 'MAC': [],", "= stdout_6.readlines() data_ = [] filter_ = [] remote_data_2 =", "remote_data_1 = { 'Hostname': '', 'IP': '', 'MAC': '', 'OS':", "due to wrong username/password\".format(Hostname)) exit(1) except: print(\"Failed to connect to", "stdout_1, _ = ssh.exec_command(\"hostname\") _, stdout_2, _ = ssh.exec_command(\"hostname -I", "-I | awk '{print $1}'\") _, stdout_3, _ = ssh.exec_command(\"cat", "-F\") _, stdout_7, _ = ssh.exec_command(\"netstat -tnpa | grep 'ESTABLISHED.*sshd'\")", "'', 'IP': '', 'MAC': '', 'OS': '', 'Currentuser': '', }", "| awk '{print $1}'\") _, stdout_3, _ = ssh.exec_command(\"cat /sys/class/net/eth0/address\")", "def remote_data_2_(): output = stdout_6.readlines() data_ = [] filter_ =", "in data_: filter_.append(list(filter(None, i))) for i in range(len(filter_)-3): remote_data_2['Hostname'].append(filter_[i][0]) remote_data_2['IP'].append(filter_[i][2])", "'Hostname': '', 'IP': '', 'MAC': '', 'OS': '', 'Currentuser': '',", "to {} due to wrong username/password\".format(Hostname)) exit(1) except: print(\"Failed to", "output_2[0].strip('\\n') remote_data_1['MAC'] = output_3[0].strip('\\n') remote_data_1['OS'] = output_4[0][1:-1].strip('\\\"') remote_data_1['Currentuser'] = output_5[0].strip('\\n')", "/sys/class/net/eth0/address\") _, stdout_4, _ = ssh.exec_command( \"awk -F= '$1=={} {{", "stdout_4, _ = ssh.exec_command( \"awk -F= '$1=={} {{ print $2", "the host -- tells us if the host is actually", "remote_data_2_(): output = stdout_6.readlines() data_ = [] filter_ = []", "output_2 = stdout_2.readlines() output_3 = stdout_3.readlines() output_4 = stdout_4.readlines() output_5", "output_4 = stdout_4.readlines() output_5 = stdout_5.readlines() remote_data_1 = { 'Hostname':", "ssh.connect(hostname=Hostname, username=Username, key_filename=key) except paramiko.AuthenticationException: print(\"Failed to connect to {}", "'ESTABLISHED.*sshd'\") #_, stdout_8, _ = ssh.exec_command(\"sudo {}/24\".format()) # egrep -o", "egrep -o '([0-9]{1,3}\\.){3}[0-9]{1,3}' --IP-address # --------------------------------- def remote_data_1(): output_1 =", "$1}'\") _, stdout_3, _ = ssh.exec_command(\"cat /sys/class/net/eth0/address\") _, stdout_4, _", "# ---------------------------------- def remote_data_2_(): output = stdout_6.readlines() data_ = []", "= output_1[0].strip('\\n') remote_data_1['IP'] = output_2[0].strip('\\n') remote_data_1['MAC'] = output_3[0].strip('\\n') remote_data_1['OS'] =", "key_filename=key) except paramiko.AuthenticationException: print(\"Failed to connect to {} due to", "'Lastseen': [], 'Status': [], } for i in output: data_.append(i.split('", "{} \".format(Hostname)) exit(2) # commands _, stdout_1, _ = ssh.exec_command(\"hostname\")", "Username = 'ec2-user' key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem' def is_connected(): try: #", "_, stdout_7, _ = ssh.exec_command(\"netstat -tnpa | grep 'ESTABLISHED.*sshd'\") #_,", "= 'ec2-user' key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem' def is_connected(): try: # connect", "# egrep -o '([0-9]{1,3}\\.){3}[0-9]{1,3}' --IP-address # --------------------------------- def remote_data_1(): output_1", "stdout_3, _ = ssh.exec_command(\"cat /sys/class/net/eth0/address\") _, stdout_4, _ = ssh.exec_command(", "us if the host is actually # reachable socket.create_connection((\"8.8.8.8\", 53))", "stdout_8, _ = ssh.exec_command(\"sudo {}/24\".format()) # egrep -o '([0-9]{1,3}\\.){3}[0-9]{1,3}' --IP-address", "} remote_data_1['Hostname'] = output_1[0].strip('\\n') remote_data_1['IP'] = output_2[0].strip('\\n') remote_data_1['MAC'] = output_3[0].strip('\\n')", "ssh.exec_command(\"last -F\") _, stdout_7, _ = ssh.exec_command(\"netstat -tnpa | grep", "paramiko import json Hostname = '172.16.17.32' Username = 'ec2-user' key", "host is actually # reachable socket.create_connection((\"8.8.8.8\", 53)) return \"conneted to", "53)) return \"conneted to the Internet!\" except OSError: pass return", "output = stdout_6.readlines() data_ = [] filter_ = [] remote_data_2", "stdout_2.readlines() output_3 = stdout_3.readlines() output_4 = stdout_4.readlines() output_5 = stdout_5.readlines()", "stdout_6, _ = ssh.exec_command(\"last -F\") _, stdout_7, _ = ssh.exec_command(\"netstat", "[], 'Status': [], } for i in output: data_.append(i.split(' '))", "paramiko.AuthenticationException: print(\"Failed to connect to {} due to wrong username/password\".format(Hostname))", "$2 ;}}' /etc/os-release\".format('\"NAME\"')) _, stdout_5, _ = ssh.exec_command(\"whoami\") _, stdout_6,", "'MAC': [], 'Lastseen': [], 'Status': [], } for i in", "= '172.16.17.32' Username = 'ec2-user' key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem' def is_connected():", "'172.16.17.32' Username = 'ec2-user' key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem' def is_connected(): try:", "ssh.exec_command(\"hostname\") _, stdout_2, _ = ssh.exec_command(\"hostname -I | awk '{print", "= 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem' def is_connected(): try: # connect to the host", "paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=Hostname, username=Username, key_filename=key) except paramiko.AuthenticationException: print(\"Failed to connect", "print(\"Failed to connect to {} due to wrong username/password\".format(Hostname)) exit(1)", "_, stdout_2, _ = ssh.exec_command(\"hostname -I | awk '{print $1}'\")", "_, stdout_6, _ = ssh.exec_command(\"last -F\") _, stdout_7, _ =", "def remote_data_1(): output_1 = stdout_1.readlines() output_2 = stdout_2.readlines() output_3 =", "'IP': [], 'MAC': [], 'Lastseen': [], 'Status': [], } for", "[], 'MAC': [], 'Lastseen': [], 'Status': [], } for i", "awk '{print $1}'\") _, stdout_3, _ = ssh.exec_command(\"cat /sys/class/net/eth0/address\") _,", "'', 'Currentuser': '', } remote_data_1['Hostname'] = output_1[0].strip('\\n') remote_data_1['IP'] = output_2[0].strip('\\n')", "connect to {} due to wrong username/password\".format(Hostname)) exit(1) except: print(\"Failed", "output_3[0].strip('\\n') remote_data_1['OS'] = output_4[0][1:-1].strip('\\\"') remote_data_1['Currentuser'] = output_5[0].strip('\\n') return json.dumps(remote_data_1, indent=4)", "remote_data_1['MAC'] = output_3[0].strip('\\n') remote_data_1['OS'] = output_4[0][1:-1].strip('\\\"') remote_data_1['Currentuser'] = output_5[0].strip('\\n') return", "-tnpa | grep 'ESTABLISHED.*sshd'\") #_, stdout_8, _ = ssh.exec_command(\"sudo {}/24\".format())", "ssh.exec_command(\"whoami\") _, stdout_6, _ = ssh.exec_command(\"last -F\") _, stdout_7, _", "is_connected() try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=Hostname, username=Username, key_filename=key) except", "# --------------------------------- def remote_data_1(): output_1 = stdout_1.readlines() output_2 = stdout_2.readlines()", "username=Username, key_filename=key) except paramiko.AuthenticationException: print(\"Failed to connect to {} due", "= stdout_1.readlines() output_2 = stdout_2.readlines() output_3 = stdout_3.readlines() output_4 =", "'Hostname': [], 'IP': [], 'MAC': [], 'Lastseen': [], 'Status': [],", "= ssh.exec_command( \"awk -F= '$1=={} {{ print $2 ;}}' /etc/os-release\".format('\"NAME\"'))", "Hostname = '172.16.17.32' Username = 'ec2-user' key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem' def", "_ = ssh.exec_command(\"last -F\") _, stdout_7, _ = ssh.exec_command(\"netstat -tnpa", "ssh.exec_command(\"sudo {}/24\".format()) # egrep -o '([0-9]{1,3}\\.){3}[0-9]{1,3}' --IP-address # --------------------------------- def", "= stdout_3.readlines() output_4 = stdout_4.readlines() output_5 = stdout_5.readlines() remote_data_1 =", "| grep 'ESTABLISHED.*sshd'\") #_, stdout_8, _ = ssh.exec_command(\"sudo {}/24\".format()) #", "---------------------------------- def remote_data_2_(): output = stdout_6.readlines() data_ = [] filter_", "_ = ssh.exec_command(\"whoami\") _, stdout_6, _ = ssh.exec_command(\"last -F\") _,", "-o '([0-9]{1,3}\\.){3}[0-9]{1,3}' --IP-address # --------------------------------- def remote_data_1(): output_1 = stdout_1.readlines()", "i))) for i in range(len(filter_)-3): remote_data_2['Hostname'].append(filter_[i][0]) remote_data_2['IP'].append(filter_[i][2]) remote_data_2['MAC'].append('not found') remote_data_2['Lastseen'].append('", "{{ print $2 ;}}' /etc/os-release\".format('\"NAME\"')) _, stdout_5, _ = ssh.exec_command(\"whoami\")", "commands _, stdout_1, _ = ssh.exec_command(\"hostname\") _, stdout_2, _ =", "to the host -- tells us if the host is", "def is_connected(): try: # connect to the host -- tells", "\"conneted to the Internet!\" except OSError: pass return \"Please Connect", "_, stdout_3, _ = ssh.exec_command(\"cat /sys/class/net/eth0/address\") _, stdout_4, _ =", "= ssh.exec_command(\"hostname\") _, stdout_2, _ = ssh.exec_command(\"hostname -I | awk", "_, stdout_1, _ = ssh.exec_command(\"hostname\") _, stdout_2, _ = ssh.exec_command(\"hostname", "'$1=={} {{ print $2 ;}}' /etc/os-release\".format('\"NAME\"')) _, stdout_5, _ =", "output_5 = stdout_5.readlines() remote_data_1 = { 'Hostname': '', 'IP': '',", "found') remote_data_2['Lastseen'].append(' '.join(filter_[i][3:8])) if 'logged' in filter_[i][9]: remote_data_2['Status'].append('Active') else: remote_data_2['Status'].append('Inactive')", "import paramiko import json Hostname = '172.16.17.32' Username = 'ec2-user'", "output_5[0].strip('\\n') return json.dumps(remote_data_1, indent=4) # ---------------------------------- def remote_data_2_(): output =", "= stdout_5.readlines() remote_data_1 = { 'Hostname': '', 'IP': '', 'MAC':", "ssh.exec_command(\"hostname -I | awk '{print $1}'\") _, stdout_3, _ =", "json Hostname = '172.16.17.32' Username = 'ec2-user' key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem'", ";}}' /etc/os-release\".format('\"NAME\"')) _, stdout_5, _ = ssh.exec_command(\"whoami\") _, stdout_6, _", "_ = ssh.exec_command(\"netstat -tnpa | grep 'ESTABLISHED.*sshd'\") #_, stdout_8, _", "_ = ssh.exec_command(\"sudo {}/24\".format()) # egrep -o '([0-9]{1,3}\\.){3}[0-9]{1,3}' --IP-address #", "connect to the host -- tells us if the host", "= [] filter_ = [] remote_data_2 = { 'Hostname': [],", "'Status': [], } for i in output: data_.append(i.split(' ')) for", "is actually # reachable socket.create_connection((\"8.8.8.8\", 53)) return \"conneted to the", "range(len(filter_)-3): remote_data_2['Hostname'].append(filter_[i][0]) remote_data_2['IP'].append(filter_[i][2]) remote_data_2['MAC'].append('not found') remote_data_2['Lastseen'].append(' '.join(filter_[i][3:8])) if 'logged' in", "i in output: data_.append(i.split(' ')) for i in data_: filter_.append(list(filter(None,", "'Currentuser': '', } remote_data_1['Hostname'] = output_1[0].strip('\\n') remote_data_1['IP'] = output_2[0].strip('\\n') remote_data_1['MAC']", "except paramiko.AuthenticationException: print(\"Failed to connect to {} due to wrong", "in output: data_.append(i.split(' ')) for i in data_: filter_.append(list(filter(None, i)))", "output_3 = stdout_3.readlines() output_4 = stdout_4.readlines() output_5 = stdout_5.readlines() remote_data_1", "Internet!\" except OSError: pass return \"Please Connect to the Internet!\"", "[] filter_ = [] remote_data_2 = { 'Hostname': [], 'IP':", "'.join(filter_[i][3:8])) if 'logged' in filter_[i][9]: remote_data_2['Status'].append('Active') else: remote_data_2['Status'].append('Inactive') # ssh.close()", "exit(1) except: print(\"Failed to connect to {} \".format(Hostname)) exit(2) #", "import json Hostname = '172.16.17.32' Username = 'ec2-user' key =", "ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=Hostname, username=Username, key_filename=key) except paramiko.AuthenticationException: print(\"Failed", "remote_data_1['IP'] = output_2[0].strip('\\n') remote_data_1['MAC'] = output_3[0].strip('\\n') remote_data_1['OS'] = output_4[0][1:-1].strip('\\\"') remote_data_1['Currentuser']", "# commands _, stdout_1, _ = ssh.exec_command(\"hostname\") _, stdout_2, _", "to {} \".format(Hostname)) exit(2) # commands _, stdout_1, _ =", "_ = ssh.exec_command( \"awk -F= '$1=={} {{ print $2 ;}}'", "= [] remote_data_2 = { 'Hostname': [], 'IP': [], 'MAC':", "\".format(Hostname)) exit(2) # commands _, stdout_1, _ = ssh.exec_command(\"hostname\") _,", "= ssh.exec_command(\"sudo {}/24\".format()) # egrep -o '([0-9]{1,3}\\.){3}[0-9]{1,3}' --IP-address # ---------------------------------", "ssh.exec_command(\"cat /sys/class/net/eth0/address\") _, stdout_4, _ = ssh.exec_command( \"awk -F= '$1=={}", "if the host is actually # reachable socket.create_connection((\"8.8.8.8\", 53)) return", "to the Internet!\" is_connected() try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=Hostname,", "remote_data_2['Lastseen'].append(' '.join(filter_[i][3:8])) if 'logged' in filter_[i][9]: remote_data_2['Status'].append('Active') else: remote_data_2['Status'].append('Inactive') #", "the Internet!\" except OSError: pass return \"Please Connect to the", "socket.create_connection((\"8.8.8.8\", 53)) return \"conneted to the Internet!\" except OSError: pass", "= ssh.exec_command(\"whoami\") _, stdout_6, _ = ssh.exec_command(\"last -F\") _, stdout_7,", "'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem' def is_connected(): try: # connect to the host --", "remote_data_1['Currentuser'] = output_5[0].strip('\\n') return json.dumps(remote_data_1, indent=4) # ---------------------------------- def remote_data_2_():", "i in range(len(filter_)-3): remote_data_2['Hostname'].append(filter_[i][0]) remote_data_2['IP'].append(filter_[i][2]) remote_data_2['MAC'].append('not found') remote_data_2['Lastseen'].append(' '.join(filter_[i][3:8])) if", "json.dumps(remote_data_1, indent=4) # ---------------------------------- def remote_data_2_(): output = stdout_6.readlines() data_", "remote_data_2['IP'].append(filter_[i][2]) remote_data_2['MAC'].append('not found') remote_data_2['Lastseen'].append(' '.join(filter_[i][3:8])) if 'logged' in filter_[i][9]: remote_data_2['Status'].append('Active')", "# connect to the host -- tells us if the", "return json.dumps(remote_data_1, indent=4) # ---------------------------------- def remote_data_2_(): output = stdout_6.readlines()", "data_: filter_.append(list(filter(None, i))) for i in range(len(filter_)-3): remote_data_2['Hostname'].append(filter_[i][0]) remote_data_2['IP'].append(filter_[i][2]) remote_data_2['MAC'].append('not", "'', 'OS': '', 'Currentuser': '', } remote_data_1['Hostname'] = output_1[0].strip('\\n') remote_data_1['IP']", "{} due to wrong username/password\".format(Hostname)) exit(1) except: print(\"Failed to connect", "')) for i in data_: filter_.append(list(filter(None, i))) for i in", "tells us if the host is actually # reachable socket.create_connection((\"8.8.8.8\",", "Internet!\" is_connected() try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=Hostname, username=Username, key_filename=key)", "ssh.exec_command(\"netstat -tnpa | grep 'ESTABLISHED.*sshd'\") #_, stdout_8, _ = ssh.exec_command(\"sudo", "_, stdout_4, _ = ssh.exec_command( \"awk -F= '$1=={} {{ print", "[], } for i in output: data_.append(i.split(' ')) for i", "_, stdout_5, _ = ssh.exec_command(\"whoami\") _, stdout_6, _ = ssh.exec_command(\"last", "-F= '$1=={} {{ print $2 ;}}' /etc/os-release\".format('\"NAME\"')) _, stdout_5, _", "return \"conneted to the Internet!\" except OSError: pass return \"Please", "wrong username/password\".format(Hostname)) exit(1) except: print(\"Failed to connect to {} \".format(Hostname))", "to the Internet!\" except OSError: pass return \"Please Connect to", "} for i in output: data_.append(i.split(' ')) for i in", "to connect to {} \".format(Hostname)) exit(2) # commands _, stdout_1,", "stdout_7, _ = ssh.exec_command(\"netstat -tnpa | grep 'ESTABLISHED.*sshd'\") #_, stdout_8,", "= ssh.exec_command(\"cat /sys/class/net/eth0/address\") _, stdout_4, _ = ssh.exec_command( \"awk -F=", "grep 'ESTABLISHED.*sshd'\") #_, stdout_8, _ = ssh.exec_command(\"sudo {}/24\".format()) # egrep", "print(\"Failed to connect to {} \".format(Hostname)) exit(2) # commands _,", "/etc/os-release\".format('\"NAME\"')) _, stdout_5, _ = ssh.exec_command(\"whoami\") _, stdout_6, _ =", "output_4[0][1:-1].strip('\\\"') remote_data_1['Currentuser'] = output_5[0].strip('\\n') return json.dumps(remote_data_1, indent=4) # ---------------------------------- def", "return \"Please Connect to the Internet!\" is_connected() try: ssh =", "'([0-9]{1,3}\\.){3}[0-9]{1,3}' --IP-address # --------------------------------- def remote_data_1(): output_1 = stdout_1.readlines() output_2", "for i in output: data_.append(i.split(' ')) for i in data_:", "'', 'MAC': '', 'OS': '', 'Currentuser': '', } remote_data_1['Hostname'] =", "= paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=Hostname, username=Username, key_filename=key) except paramiko.AuthenticationException: print(\"Failed to", "stdout_4.readlines() output_5 = stdout_5.readlines() remote_data_1 = { 'Hostname': '', 'IP':", "--------------------------------- def remote_data_1(): output_1 = stdout_1.readlines() output_2 = stdout_2.readlines() output_3", "connect to {} \".format(Hostname)) exit(2) # commands _, stdout_1, _", "stdout_6.readlines() data_ = [] filter_ = [] remote_data_2 = {", "= { 'Hostname': '', 'IP': '', 'MAC': '', 'OS': '',", "key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem' def is_connected(): try: # connect to the", "reachable socket.create_connection((\"8.8.8.8\", 53)) return \"conneted to the Internet!\" except OSError:", "'OS': '', 'Currentuser': '', } remote_data_1['Hostname'] = output_1[0].strip('\\n') remote_data_1['IP'] =", "ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=Hostname, username=Username, key_filename=key) except paramiko.AuthenticationException: print(\"Failed to connect to", "i in data_: filter_.append(list(filter(None, i))) for i in range(len(filter_)-3): remote_data_2['Hostname'].append(filter_[i][0])", "for i in range(len(filter_)-3): remote_data_2['Hostname'].append(filter_[i][0]) remote_data_2['IP'].append(filter_[i][2]) remote_data_2['MAC'].append('not found') remote_data_2['Lastseen'].append(' '.join(filter_[i][3:8]))", "'ec2-user' key = 'G:/Projects/Python/Asset-Discovery-Tool/tool/s.pem' def is_connected(): try: # connect to", "\"Please Connect to the Internet!\" is_connected() try: ssh = paramiko.SSHClient()", "the Internet!\" is_connected() try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) ssh.connect(hostname=Hostname, username=Username,", "= ssh.exec_command(\"last -F\") _, stdout_7, _ = ssh.exec_command(\"netstat -tnpa |", "indent=4) # ---------------------------------- def remote_data_2_(): output = stdout_6.readlines() data_ =", "for i in data_: filter_.append(list(filter(None, i))) for i in range(len(filter_)-3):", "# reachable socket.create_connection((\"8.8.8.8\", 53)) return \"conneted to the Internet!\" except", "_ = ssh.exec_command(\"hostname -I | awk '{print $1}'\") _, stdout_3,", "stdout_2, _ = ssh.exec_command(\"hostname -I | awk '{print $1}'\") _,", "OSError: pass return \"Please Connect to the Internet!\" is_connected() try:", "remote_data_2['Hostname'].append(filter_[i][0]) remote_data_2['IP'].append(filter_[i][2]) remote_data_2['MAC'].append('not found') remote_data_2['Lastseen'].append(' '.join(filter_[i][3:8])) if 'logged' in filter_[i][9]:", "data_ = [] filter_ = [] remote_data_2 = { 'Hostname':", "in range(len(filter_)-3): remote_data_2['Hostname'].append(filter_[i][0]) remote_data_2['IP'].append(filter_[i][2]) remote_data_2['MAC'].append('not found') remote_data_2['Lastseen'].append(' '.join(filter_[i][3:8])) if 'logged'", "<filename>tool/remote_info.py import socket import paramiko import json Hostname = '172.16.17.32'", "ssh.exec_command( \"awk -F= '$1=={} {{ print $2 ;}}' /etc/os-release\".format('\"NAME\"')) _,", "data_.append(i.split(' ')) for i in data_: filter_.append(list(filter(None, i))) for i", "{}/24\".format()) # egrep -o '([0-9]{1,3}\\.){3}[0-9]{1,3}' --IP-address # --------------------------------- def remote_data_1():", "exit(2) # commands _, stdout_1, _ = ssh.exec_command(\"hostname\") _, stdout_2,", "= ssh.exec_command(\"netstat -tnpa | grep 'ESTABLISHED.*sshd'\") #_, stdout_8, _ =", "'', } remote_data_1['Hostname'] = output_1[0].strip('\\n') remote_data_1['IP'] = output_2[0].strip('\\n') remote_data_1['MAC'] =", "remote_data_2 = { 'Hostname': [], 'IP': [], 'MAC': [], 'Lastseen':", "to wrong username/password\".format(Hostname)) exit(1) except: print(\"Failed to connect to {}", "= stdout_2.readlines() output_3 = stdout_3.readlines() output_4 = stdout_4.readlines() output_5 =", "if 'logged' in filter_[i][9]: remote_data_2['Status'].append('Active') else: remote_data_2['Status'].append('Inactive') # ssh.close() return", "the host is actually # reachable socket.create_connection((\"8.8.8.8\", 53)) return \"conneted", "socket import paramiko import json Hostname = '172.16.17.32' Username =", "except: print(\"Failed to connect to {} \".format(Hostname)) exit(2) # commands", "filter_ = [] remote_data_2 = { 'Hostname': [], 'IP': [],", "try: # connect to the host -- tells us if", "stdout_3.readlines() output_4 = stdout_4.readlines() output_5 = stdout_5.readlines() remote_data_1 = {", "'logged' in filter_[i][9]: remote_data_2['Status'].append('Active') else: remote_data_2['Status'].append('Inactive') # ssh.close() return remote_data_2", "Connect to the Internet!\" is_connected() try: ssh = paramiko.SSHClient() ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())", "= output_3[0].strip('\\n') remote_data_1['OS'] = output_4[0][1:-1].strip('\\\"') remote_data_1['Currentuser'] = output_5[0].strip('\\n') return json.dumps(remote_data_1,", "= output_2[0].strip('\\n') remote_data_1['MAC'] = output_3[0].strip('\\n') remote_data_1['OS'] = output_4[0][1:-1].strip('\\\"') remote_data_1['Currentuser'] =", "--IP-address # --------------------------------- def remote_data_1(): output_1 = stdout_1.readlines() output_2 =", "remote_data_1['OS'] = output_4[0][1:-1].strip('\\\"') remote_data_1['Currentuser'] = output_5[0].strip('\\n') return json.dumps(remote_data_1, indent=4) #" ]
[ "Path :type path: str | unicode :param secret: The contents", "secret's latest version at the specified location. This marks the", "= {} if version is not None: params['version'] = version", "key will require the cas parameter to be set on", "response of the request. :rtype: dict \"\"\" api_path = utils.format_url(", "Supported methods: DELETE: /{mount_point}/data/{path}. Produces: 204 (empty body) :param path:", "all version data for the specified key. All version history", "dict \"\"\" params = { 'options': {}, 'data': secret, }", "will be returned on normal get requests. :type versions: list", "'argument to \"versions\" must be a list containing one or", "an existing path. Supported methods: POST: /{mount_point}/metadata/{path}. Produces: 204 (empty", "If the value already exists, the calling token must have", "the write will only be allowed if the key's current", "\"\"\"Read the KV Version 2 configuration. Supported methods: GET: /auth/{mount_point}/config.", "str | unicode :param secret: The contents of the \"secret\"", "return response.json() def create_or_update_secret(self, path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Create a", "data, allowing it to be returned on get requests. Supported", "key has more than the configured allowed versions the oldest", "max_versions: int :param cas_required: If true the key will require", "Specifies the path of the secret to delete. This is", "undelete path. Supported methods: POST: /{mount_point}/delete/{path}. Produces: 204 (empty body)", "} if cas is not None: params['options']['cas'] = cas api_path", "cas: Set the \"cas\" value to use a Check-And-Set operation.", "= version api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.get(", "'bool expected for cas_required param, {type} received'.format(type=type(cas_required)) raise exceptions.ParamValidationError(error_msg) params['cas_required']", ":param path: Path :type path: str | unicode :param max_versions:", ":type path: str | unicode :param mount_point: The \"path\" the", "the underlying data will not be removed. A delete can", "is not None: params['options']['cas'] = cas api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point,", "unicode :param secret: The contents of the \"secret\" dict will", "-*- coding: utf-8 -*- \"\"\"KvV2 methods module.\"\"\" from hvac import", "json=params, ) return response.json() def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Set", "specified path. Supported methods: GET: /{mount_point}/metadata/{path}. Produces: 200 application/json :param", "be removed. Supported methods: DELETE: /{mount_point}/metadata/{path}. Produces: 204 (empty body)", "location. Supported methods: GET: /{mount_point}/data/{path}. Produces: 200 application/json :param path:", "utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) return self._adapter.delete( url=api_path, ) def delete_secret_versions(self, path,", "path: str | unicode :param version: Specifies the version to", "# Write back updated secret. return self.create_or_update_secret( path=path, cas=current_secret_version['data']['metadata']['version'], secret=patched_secret,", "# Update existing secret dict. patched_secret = current_secret_version['data']['data'] patched_secret.update(secret) #", "request. :rtype: requests.Response \"\"\" params = { 'max_versions': max_versions, }", "configuration will be used. :type cas_required: bool :param mount_point: The", "\"\"\"Set or update data in the KV store without overwriting.", "path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Delete (permanently) the key metadata and all version", "be a list containing one or more integers, \"{versions}\" provided.'.format(", "path: Specifies the path of the secret to undelete. This", "url=api_path, ) return response.json() def update_metadata(self, path, max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT):", ":param mount_point: The \"path\" the secret engine was mounted on.", "history will be removed. Supported methods: DELETE: /{mount_point}/metadata/{path}. Produces: 204", "max version is used. Once a key has more than", "= max_versions if cas_required is not None: if not isinstance(cas_required,", "\"versions\" must be a list containing one or more integers,", "def undelete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Undelete the data for the", "The JSON response of the request. :rtype: dict \"\"\" params", "requests. :type versions: int :param mount_point: The \"path\" the secret", ":type versions: list of int :param mount_point: The \"path\" the", "\"\"\"Retrieve the metadata and versions for the secret at the", "'secret' class KvV2(VaultApiBase): \"\"\"KV Secrets Engine - Version 2 (API).", "Specifies the path of the secrets to list. This is", "must have an ACL policy granting the create capability. If", ":type mount_point: str | unicode :return: The JSON response of", "/. The input must be a folder; list on a", ":param cas: Set the \"cas\" value to use a Check-And-Set", "the create capability. If the value already exists, the calling", "be deleted. The versioned data will not be deleted, but", "specified location. This marks the version as deleted and will", "If the index is non-zero the write will only be", "versions the oldest version will be permanently deleted. :type max_versions:", "the key-value store. Supported methods: POST: /{mount_point}/config. Produces: 204 (empty", "destroy_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Permanently remove the specified version data", "the path of the secret to delete. This is specified", "be returned in normal get requests. :type versions: int :param", "be used. :type cas_required: bool :param mount_point: The \"path\" the", "without overwriting. :param path: Path :type path: str | unicode", "versions=versions ) raise exceptions.ParamValidationError(error_msg) params = { 'versions': versions, }", "methods: GET: /{mount_point}/metadata/{path}. Produces: 200 application/json :param path: Specifies the", "\"\"\"Return a list of key names at the specified location.", "The \"path\" the secret engine was mounted on. :type mount_point:", "POST: /{mount_point}/undelete/{path}. Produces: 204 (empty body) :param path: Specifies the", "= utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def", "on all write requests. :type cas_required: bool :param mount_point: The", "no longer be returned in normal get requests. :type versions:", "int :param secret: The contents of the \"secret\" dict will", "to be returned on get requests. Supported methods: POST: /{mount_point}/undelete/{path}.", "exceptions.InvalidPath: raise exceptions.InvalidPath('No value found at \"{path}\"; patch only works", "secret at the specified location. If the value does not", "path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the metadata and versions for the secret", "utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.post( url=api_path, json=params, ) return", ":type mount_point: str | unicode :return: The response of the", "policy granting the create capability. If the value already exists,", "the key doesn't exist. If the index is non-zero the", "of versions to keep per key. If not set, the", "Note that no policy-based filtering is performed on keys; do", "latest version is returned. :type version: int :param mount_point: The", "be undone using the undelete path. Supported methods: DELETE: /{mount_point}/data/{path}.", "/{mount_point}/delete/{path}. Produces: 204 (empty body) :param path: Specifies the path", "error_msg = 'bool expected for cas_required param, {type} received'.format(type=type(cas_required)) raise", "the path of the secret to undelete. This is specified", "the path of the secret to read. This is specified", "undelete. The versions will be restored and their data will", "| unicode :param max_versions: The number of versions to keep", "url=api_path, json=params, ) def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Return a list", "restored and their data will be returned on normal get", "= self.read_secret_version( path=path, mount_point=mount_point, ) except exceptions.InvalidPath: raise exceptions.InvalidPath('No value", "(permanently) the key metadata and all version data for the", "of the request. :rtype: requests.Response \"\"\" params = { 'max_versions':", ":type path: str | unicode :param cas: Set the \"cas\"", "self._adapter.post( url=api_path, json=params, ) return response.json() def patch(self, path, secret,", "0: error_msg = 'argument to \"versions\" must be a list", "already exists, the calling token must have an ACL policy", "max_versions: The number of versions to keep per key. This", "version as deleted and will stop it from being returned", ":rtype: requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) return self._adapter.delete(", "version is used. Once a key has more than the", "max_versions, } if cas_required is not None: params['cas_required'] = cas_required", "ACL policy granting the create capability. If the value already", "methods: POST: /{mount_point}/undelete/{path}. Produces: 204 (empty body) :param path: Specifies", "json=params, ) def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Read the KV Version 2", "= utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.post( url=api_path, json=params, )", "The response of the request. :rtype: requests.Response \"\"\" params =", "token must have an ACL policy granting the update capability.", "200 application/json :param path: Path :type path: str | unicode", "path. Supported methods: POST: /{mount_point}/delete/{path}. Produces: 204 (empty body) :param", "VaultApiBase DEFAULT_MOUNT_POINT = 'secret' class KvV2(VaultApiBase): \"\"\"KV Secrets Engine -", "underlying data will not be removed. A delete can be", "to keep per key. This value applies to all keys,", "path from the key-value store. Supported methods: POST: /{mount_point}/destroy/{path}. Produces:", "path: str | unicode :param secret: The contents of the", "api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.list( url=api_path, )", "def destroy_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Permanently remove the specified version", "int :param mount_point: The \"path\" the secret engine was mounted", "None: params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point) return self._adapter.post(", "version at the specified location. This marks the version as", "<filename>hvac/api/secrets_engines/kv_v2.py #!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"KvV2 methods", "If true the key will require the cas parameter to", "than the configured allowed versions the oldest version will be", "provided version and path in the key-value store. This restores", ":type path: str | unicode :param version: Specifies the version", "Supported methods: GET: /{mount_point}/metadata/{path}. Produces: 200 application/json :param path: Specifies", "self._adapter.list( url=api_path, ) return response.json() def read_secret_metadata(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve", "the oldest version will be permanently deleted. :type max_versions: int", "remove the specified version data and numbers for the provided", "parameter to be set on all write requests. :type cas_required:", "path: Specifies the path of the secret to delete. This", "None: params['version'] = version api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response", "be deleted, but it will no longer be returned in", "requests. :type cas_required: bool :param mount_point: The \"path\" the secret", "if cas_required is not None: params['cas_required'] = cas_required api_path =", "undelete path. Supported methods: DELETE: /{mount_point}/data/{path}. Produces: 204 (empty body)", "request. :rtype: dict \"\"\" api_path = utils.format_url( '/v1/{mount_point}/config', mount_point=mount_point, )", "an ACL policy granting the update capability. Supported methods: POST:", "unicode :return: The response of the request. :rtype: requests.Response \"\"\"", "calling token must have an ACL policy granting the update", "secret. return self.create_or_update_secret( path=path, cas=current_secret_version['data']['metadata']['version'], secret=patched_secret, mount_point=mount_point, ) def delete_latest_version_of_secret(self,", "data and numbers for the provided path from the key-value", "Once a key has more than the configured allowed versions", "the request. :rtype: dict \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path)", "/{mount_point}/data/{path}. Produces: 204 (empty body) :param path: Specifies the path", "data in the KV store without overwriting. :param path: Path", "Supported methods: GET: /auth/{mount_point}/config. Produces: 200 application/json :param mount_point: The", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Undelete the data for the provided version and path", "path: Specifies the path of the secret to destroy. This", "https://www.vaultproject.io/api/secret/kv/kv-v2.html \"\"\" def configure(self, max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Configure backend level", "param, {type} received'.format(type=type(cas_required)) raise exceptions.ParamValidationError(error_msg) params['cas_required'] = cas_required api_path =", "{ 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point, path=path) return", "methods: DELETE: /{mount_point}/metadata/{path}. Produces: 204 (empty body) :param path: Specifies", "return. If not set the latest version is returned. :type", "'data': secret, } if cas is not None: params['options']['cas'] =", "latest version at the specified location. This marks the version", "capability. If the value already exists, the calling token must", "module.\"\"\" from hvac import exceptions, utils from hvac.api.vault_api_base import VaultApiBase", "delete. This is specified as part of the URL. :type", "secret to read. This is specified as part of the", "and their data will be returned on normal get requests.", "the specified location. If the value does not yet exist,", "URL. :type path: str | unicode :param versions: The versions", "cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Configure backend level settings that are applied to", "write will only be allowed if the key's current version", "response of the create_or_update_secret request. :rtype: dict \"\"\" # First,", ") def undelete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Undelete the data for", "POST: /{mount_point}/metadata/{path}. Produces: 204 (empty body) :param path: Path :type", "params = {} if max_versions is not None: params['max_versions'] =", "deleted. :type versions: list of int :param mount_point: The \"path\"", "def delete_latest_version_of_secret(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete of the", "delete_latest_version_of_secret(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete of the secret's", ":param versions: The versions to be deleted. The versioned data", "max_versions: int :param cas_required: If true all keys will require", "request. :rtype: requests.Response \"\"\" if not isinstance(versions, list) or len(versions)", "to destroy. Their data will be permanently deleted. :type versions:", "200 application/json :param path: Specifies the path of the secrets", "data for the specified key. All version history will be", "value. Once a key has more than the configured allowed", "to every key in the key-value store. Supported methods: POST:", "= utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, params=params, )", "list of key names at the specified location. Folders are", "to delete. This is specified as part of the URL.", "url=api_path, params=params, ) return response.json() def create_or_update_secret(self, path, secret, cas=None,", "version matches the version specified in the cas parameter. :type", "doesn't exist. If the index is non-zero the write will", "delete_metadata_and_all_versions(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Delete (permanently) the key metadata and all", "secret to undelete. This is specified as part of the", "| unicode :param cas: Set the \"cas\" value to use", "the specified version data and numbers for the provided path", "is non-zero the write will only be allowed if the", "/{mount_point}/metadata/{path}. Produces: 204 (empty body) :param path: Path :type path:", ":type path: str | unicode :param secret: The contents of", ":param path: Specifies the path of the secrets to list.", "not return a value. Note that no policy-based filtering is", "to keep per key. If not set, the backend's configured", "version specified in the cas parameter. :type cas: int :param", "write will be allowed. If set to 0 a write", "secret to destroy. This is specified as part of the", "the version to return. If not set the latest version", "of the request. :rtype: requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point,", "location. If the value does not yet exist, the calling", "If not set, the backend's configured max version is used.", "Specifies the path of the secret to destroy. This is", "soft delete of the specified versions of the secret. This", "and will stop it from being returned from reads, but", "names at the specified location. Folders are suffixed with /.", "be returned on get requests. Supported methods: POST: /{mount_point}/undelete/{path}. Produces:", "longer be returned in normal get requests. :type versions: int", "If true all keys will require the cas parameter to", "utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.list( url=api_path, ) return response.json()", "the undelete path. Supported methods: POST: /{mount_point}/delete/{path}. Produces: 204 (empty", "return response.json() def update_metadata(self, path, max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Updates the", "cas_required api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params,", "dict \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.list(", "a key has more than the configured allowed versions the", "the value does not yet exist, the calling token must", "filtering is performed on keys; do not encode sensitive information", "the index is non-zero the write will only be allowed", "key names. The values themselves are not accessible via this", "dict \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.get(", "keys, but a key's metadata setting can overwrite this value.", "self._adapter.get( url=api_path, ) return response.json() def update_metadata(self, path, max_versions=None, cas_required=None,", "per key. If not set, the backend's configured max version", "path=path) return self._adapter.post( url=api_path, json=params, ) def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT):", "params = {} if version is not None: params['version'] =", "is returned. :type version: int :param mount_point: The \"path\" the", "params=params, ) return response.json() def create_or_update_secret(self, path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT):", "secret at the specified path. Supported methods: GET: /{mount_point}/metadata/{path}. Produces:", "= { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point, path=path)", "not set the write will be allowed. If set to", "keep per key. This value applies to all keys, but", "= current_secret_version['data']['data'] patched_secret.update(secret) # Write back updated secret. return self.create_or_update_secret(", "used. Once a key has more than the configured allowed", "one or more integers, \"{versions}\" provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg)", "version=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the secret at the specified location. Supported", "not be deleted, but it will no longer be returned", "path. Supported methods: GET: /{mount_point}/metadata/{path}. Produces: 200 application/json :param path:", "it to be returned on get requests. Supported methods: POST:", "and versions for the secret at the specified path. Supported", "settings that are applied to every key in the key-value", "The values themselves are not accessible via this command. Supported", "def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the secret at the", ":param max_versions: The number of versions to keep per key.", "key. If not set, the backend's configured max version is", "only works on existing data.'.format(path=path)) # Update existing secret dict.", "response of the request. :rtype: requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/data/{path}',", "list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Return a list of key names at", "versions as deleted and will stop them from being returned", "mount_point=mount_point, ) except exceptions.InvalidPath: raise exceptions.InvalidPath('No value found at \"{path}\";", "allowed if the key's current version matches the version specified", "on an existing path. Supported methods: POST: /{mount_point}/metadata/{path}. Produces: 204", "/{mount_point}/config. Produces: 204 (empty body) :param max_versions: The number of", "every key in the key-value store. Supported methods: POST: /{mount_point}/config.", "secret to delete. This is specified as part of the", "requests. If false, the backend's configuration will be used. :type", "if the key's current version matches the version specified in", ":return: The response of the request. :rtype: requests.Response \"\"\" params", "versions of the secret. This marks the versions as deleted", "response of the request. :rtype: requests.Response \"\"\" if not isinstance(versions,", "\"\"\" def configure(self, max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Configure backend level settings", "of the secret to undelete. This is specified as part", "list of int :param mount_point: The \"path\" the secret engine", "read. :type secret: dict :param mount_point: The \"path\" the secret", "values themselves are not accessible via this command. Supported methods:", "but the underlying data will not be removed. A delete", "path of the secret to destroy. This is specified as", "metadata and versions for the secret at the specified path.", "| unicode :return: The response of the request. :rtype: requests.Response", "current_secret_version['data']['data'] patched_secret.update(secret) # Write back updated secret. return self.create_or_update_secret( path=path,", "the request. :rtype: dict \"\"\" api_path = utils.format_url( '/v1/{mount_point}/config', mount_point=mount_point,", "methods: POST: /{mount_point}/metadata/{path}. Produces: 204 (empty body) :param path: Path", "read_secret_metadata(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the metadata and versions for the", "current version matches the version specified in the cas parameter.", "of the create_or_update_secret request. :rtype: dict \"\"\" # First, do", "api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point) return self._adapter.post( url=api_path, json=params, ) def", "\"\"\"KvV2 methods module.\"\"\" from hvac import exceptions, utils from hvac.api.vault_api_base", "returned in normal get requests. :type versions: int :param mount_point:", "utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, ) return response.json()", "applied to every key in the key-value store. Supported methods:", "api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) return self._adapter.delete( url=api_path, ) def", ":rtype: dict \"\"\" api_path = utils.format_url( '/v1/{mount_point}/config', mount_point=mount_point, ) response", "not None: params['version'] = version api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path)", "= self._adapter.get( url=api_path, ) return response.json() def update_metadata(self, path, max_versions=None,", "application/json :param path: Path :type path: str | unicode :param", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Return a list of key names at the specified", "This marks the version as deleted and will stop it", "as part of the URL. :type path: str | unicode", "the specified path. Supported methods: GET: /{mount_point}/metadata/{path}. Produces: 200 application/json", "params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point) return self._adapter.post( url=api_path,", "patched_secret = current_secret_version['data']['data'] patched_secret.update(secret) # Write back updated secret. return", "def update_metadata(self, path, max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Updates the max_versions of", "path=path) response = self._adapter.post( url=api_path, json=params, ) return response.json() def", "Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html \"\"\" def configure(self, max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Configure backend", "versions to keep per key. If not set, the backend's", "the secret at the specified path. Supported methods: GET: /{mount_point}/metadata/{path}.", "be permanently deleted. :type max_versions: int :param cas_required: If true", "response = self._adapter.post( url=api_path, json=params, ) return response.json() def patch(self,", "be a folder; list on a file will not return", "will be permanently deleted. :type versions: list of int :param", "integers, \"{versions}\" provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg) params = {", ") return response.json() def read_secret_metadata(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the metadata", "configured allowed versions the oldest version will be permanently deleted.", "JSON response of the create_or_update_secret request. :rtype: dict \"\"\" #", "the undelete path. Supported methods: DELETE: /{mount_point}/data/{path}. Produces: 204 (empty", "\"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.get( url=api_path,", "the latest version is returned. :type version: int :param mount_point:", ":return: The JSON response of the create_or_update_secret request. :rtype: dict", "the provided path from the key-value store. Supported methods: POST:", "a read. try: current_secret_version = self.read_secret_version( path=path, mount_point=mount_point, ) except", "on all write requests. If false, the backend's configuration will", ":param path: Specifies the path of the secret to destroy.", "the secret to read. This is specified as part of", "the specified location. This marks the version as deleted and", "| unicode :param version: Specifies the version to return. If", "max_versions is not None: params['max_versions'] = max_versions if cas_required is", "raise exceptions.ParamValidationError(error_msg) params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path)", ":type version: int :param mount_point: The \"path\" the secret engine", "the request. :rtype: requests.Response \"\"\" if not isinstance(versions, list) or", "path: str | unicode :param versions: The versions to be", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Delete (permanently) the key metadata and all version data", "\"\"\" api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) return self._adapter.delete( url=api_path, )", "(empty body) :param path: Specifies the path of the secret", "self._adapter.get( url=api_path, params=params, ) return response.json() def create_or_update_secret(self, path, secret,", "list) or len(versions) == 0: error_msg = 'argument to \"versions\"", "methods: POST: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Path :type", "response = self._adapter.list( url=api_path, ) return response.json() def read_secret_metadata(self, path,", "= utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def", "The versions to destroy. Their data will be permanently deleted.", "not yet exist, the calling token must have an ACL", ":type path: str | unicode :param max_versions: The number of", "JSON response of the request. :rtype: dict \"\"\" api_path =", "isinstance(versions, list) or len(versions) == 0: error_msg = 'argument to", ":rtype: requests.Response \"\"\" params = {} if max_versions is not", "methods: GET: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Specifies the", "of the secret to read. This is specified as part", "str | unicode :param cas: Set the \"cas\" value to", "url=api_path, json=params, ) def destroy_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Permanently remove", "api_path = utils.format_url( '/v1/{mount_point}/config', mount_point=mount_point, ) response = self._adapter.get(url=api_path) return", "folder; list on a file will not return a value.", "data for the provided version and path in the key-value", "URL. :type path: str | unicode :param version: Specifies the", ") def delete_metadata_and_all_versions(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Delete (permanently) the key metadata", "a list of key names at the specified location. Folders", "def delete_metadata_and_all_versions(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Delete (permanently) the key metadata and", "permanently deleted. :type versions: list of int :param mount_point: The", "'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point, path=path) return self._adapter.post(", "return self._adapter.post( url=api_path, json=params, ) def undelete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT):", "a key's metadata setting can overwrite this value. Once a", "be returned on normal get requests. :type versions: list of", "str | unicode :param max_versions: The number of versions to", "url=api_path, json=params, ) def delete_metadata_and_all_versions(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Delete (permanently) the", "deleted and will stop them from being returned from reads,", "specified key. All version history will be removed. Supported methods:", "self._adapter.post( url=api_path, json=params, ) def delete_metadata_and_all_versions(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Delete (permanently)", "the path of the secrets to list. This is specified", "path: str | unicode :param versions: The versions to destroy.", "dict \"\"\" api_path = utils.format_url( '/v1/{mount_point}/config', mount_point=mount_point, ) response =", "all keys, but a key's metadata setting can overwrite this", "{ 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point, path=path) return", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Create a new version of a secret at the", "in the KV store without overwriting. :param path: Path :type", "versions to keep per key. This value applies to all", "all write requests. If false, the backend's configuration will be", "not None: params['options']['cas'] = cas api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path)", "the secret's latest version at the specified location. This marks", "versions: The versions to destroy. Their data will be permanently", "themselves are not accessible via this command. Supported methods: LIST:", "value found at \"{path}\"; patch only works on existing data.'.format(path=path))", "being returned from reads, but the underlying data will not", "try: current_secret_version = self.read_secret_version( path=path, mount_point=mount_point, ) except exceptions.InvalidPath: raise", "sensitive information in key names. The values themselves are not", "versions: int :param mount_point: The \"path\" the secret engine was", "str | unicode :return: The JSON response of the request.", "backend's configuration will be used. :type cas_required: bool :param mount_point:", "key-value store. Supported methods: POST: /{mount_point}/config. Produces: 204 (empty body)", "str | unicode :param mount_point: The \"path\" the secret engine", "Set the \"cas\" value to use a Check-And-Set operation. If", "not None: params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point) return", "keep per key. If not set, the backend's configured max", "of the request. :rtype: requests.Response \"\"\" if not isinstance(versions, list)", "are suffixed with /. The input must be a folder;", "the path of the secret to destroy. This is specified", "Specifies the version to return. If not set the latest", "configure(self, max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Configure backend level settings that are", "If false, the backend's configuration will be used. :type cas_required:", "exist, the calling token must have an ACL policy granting", "get requests. :type versions: list of int :param mount_point: The", "operation. If not set the write will be allowed. If", "path: Specifies the path of the secret to read. This", "part of the URL. :type path: str | unicode :param", "be set on all write requests. If false, the backend's", "more integers, \"{versions}\" provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg) params =", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Read the KV Version 2 configuration. Supported methods: GET:", "self._adapter.delete( url=api_path, ) def delete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a", "params['options']['cas'] = cas api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response =", ":param secret: The contents of the \"secret\" dict will be", "the request. :rtype: requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path)", "is not None: params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point)", "the secret to delete. This is specified as part of", "metadata and all version data for the specified key. All", "max_versions if cas_required is not None: if not isinstance(cas_required, bool):", "cas_required: bool :param mount_point: The \"path\" the secret engine was", "params = { 'options': {}, 'data': secret, } if cas", "contents of the \"secret\" dict will be stored and returned", "metadata setting can overwrite this value. Once a key has", "mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def delete_metadata_and_all_versions(self, path,", "\"{path}\"; patch only works on existing data.'.format(path=path)) # Update existing", "mount_point=mount_point) return self._adapter.post( url=api_path, json=params, ) def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Read", "of the secret to destroy. This is specified as part", "Secrets Engine - Version 2 (API). Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html \"\"\" def", "request. :rtype: dict \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response", "patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Set or update data in the", "GET: /auth/{mount_point}/config. Produces: 200 application/json :param mount_point: The \"path\" the", "deleted, but it will no longer be returned in normal", "cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Updates the max_versions of cas_required setting on an", "mount_point: str | unicode :return: The JSON response of the", "If set to 0 a write will only be allowed", "require the cas parameter to be set on all write", "\"{versions}\" provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg) params = { 'versions':", "in the key-value store. This restores the data, allowing it", "the KV Version 2 configuration. Supported methods: GET: /auth/{mount_point}/config. Produces:", "using the undelete path. Supported methods: DELETE: /{mount_point}/data/{path}. Produces: 204", "path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Create a new version of a", "will be stored and returned on read. :type secret: dict", "exceptions.ParamValidationError(error_msg) params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return", "None: params['max_versions'] = max_versions if cas_required is not None: if", "path of the secret to undelete. This is specified as", "hvac.api.vault_api_base import VaultApiBase DEFAULT_MOUNT_POINT = 'secret' class KvV2(VaultApiBase): \"\"\"KV Secrets", "to be deleted. The versioned data will not be deleted,", "versions: The versions to be deleted. The versioned data will", "key. All version history will be removed. Supported methods: DELETE:", "no policy-based filtering is performed on keys; do not encode", "response.json() def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the secret at", "at \"{path}\"; patch only works on existing data.'.format(path=path)) # Update", "not encode sensitive information in key names. The values themselves", "a soft delete of the secret's latest version at the", "setting on an existing path. Supported methods: POST: /{mount_point}/metadata/{path}. Produces:", "utf-8 -*- \"\"\"KvV2 methods module.\"\"\" from hvac import exceptions, utils", "Supported methods: POST: /{mount_point}/undelete/{path}. Produces: 204 (empty body) :param path:", "is not None: if not isinstance(cas_required, bool): error_msg = 'bool", "= utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.list( url=api_path, ) return", "mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def undelete_secret_versions(self, path,", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Permanently remove the specified version data and numbers for", "update_metadata(self, path, max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Updates the max_versions of cas_required", "\"\"\"Permanently remove the specified version data and numbers for the", "Path :type path: str | unicode :param cas: Set the", "configuration. Supported methods: GET: /auth/{mount_point}/config. Produces: 200 application/json :param mount_point:", "path: Specifies the path of the secrets to list. This", "cas: int :param secret: The contents of the \"secret\" dict", "the URL. :type path: str | unicode :param versions: The", "matches the version specified in the cas parameter. :type cas:", "not be removed. A delete can be undone using the", "return self._adapter.post( url=api_path, json=params, ) def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Read the", "and returned on read. :type secret: dict :param mount_point: The", "marks the versions as deleted and will stop them from", "\"\"\"Configure backend level settings that are applied to every key", "requests. Supported methods: POST: /{mount_point}/undelete/{path}. Produces: 204 (empty body) :param", "to list. This is specified as part of the URL.", "GET: /{mount_point}/metadata/{path}. Produces: 200 application/json :param path: Specifies the path", "set on all write requests. :type cas_required: bool :param mount_point:", "= self._adapter.get( url=api_path, params=params, ) return response.json() def create_or_update_secret(self, path,", "store without overwriting. :param path: Path :type path: str |", "returned on normal get requests. :type versions: list of int", "str | unicode :param versions: The versions to undelete. The", "the specified key. All version history will be removed. Supported", "backend level settings that are applied to every key in", ":return: The response of the request. :rtype: requests.Response \"\"\" api_path", "version to return. If not set the latest version is", "data will be returned on normal get requests. :type versions:", "that no policy-based filtering is performed on keys; do not", "the oldest version will be permanently deleted. Defaults to 10.", "This restores the data, allowing it to be returned on", "(API). Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html \"\"\" def configure(self, max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Configure", "path, secret, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Set or update data in the KV", ") def delete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete", "command. Supported methods: LIST: /{mount_point}/metadata/{path}. Produces: 200 application/json :param path:", "(empty body) :param path: Path :type path: str | unicode", "secret: The contents of the \"secret\" dict will be stored", "def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Return a list of key names", "mount_point=mount_point, ) response = self._adapter.get(url=api_path) return response.json() def read_secret_version(self, path,", "a Check-And-Set operation. If not set the write will be", "mount_point=mount_point, path=path) response = self._adapter.post( url=api_path, json=params, ) return response.json()", "allowed versions the oldest version will be permanently deleted. :type", "methods: POST: /{mount_point}/destroy/{path}. Produces: 204 (empty body) :param path: Specifies", "{} if max_versions is not None: params['max_versions'] = max_versions if", "mount_point=mount_point, path=path) return self._adapter.delete( url=api_path, ) def delete_secret_versions(self, path, versions,", "more than the configured allowed versions the oldest version will", "path=path) return self._adapter.post( url=api_path, json=params, ) def destroy_secret_versions(self, path, versions,", "get requests. Supported methods: POST: /{mount_point}/undelete/{path}. Produces: 204 (empty body)", "| unicode :param versions: The versions to be deleted. The", "path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Return a list of key names at the", "specified location. Folders are suffixed with /. The input must", ":param path: Specifies the path of the secret to read.", "First, do a read. try: current_secret_version = self.read_secret_version( path=path, mount_point=mount_point,", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the metadata and versions for the secret at", "204 (empty body) :param max_versions: The number of versions to", "restores the data, allowing it to be returned on get", "response.json() def create_or_update_secret(self, path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Create a new", "used. :type cas_required: bool :param mount_point: The \"path\" the secret", "Version 2 configuration. Supported methods: GET: /auth/{mount_point}/config. Produces: 200 application/json", "are applied to every key in the key-value store. Supported", "the cas parameter. :type cas: int :param secret: The contents", "reads, but the underlying data will not be removed. A", "for the secret at the specified path. Supported methods: GET:", "secret, } if cas is not None: params['options']['cas'] = cas", "\"\"\" if not isinstance(versions, list) or len(versions) == 0: error_msg", "works on existing data.'.format(path=path)) # Update existing secret dict. patched_secret", "return self.create_or_update_secret( path=path, cas=current_secret_version['data']['metadata']['version'], secret=patched_secret, mount_point=mount_point, ) def delete_latest_version_of_secret(self, path,", "create_or_update_secret request. :rtype: dict \"\"\" # First, do a read.", "will be used. :type cas_required: bool :param mount_point: The \"path\"", "key metadata and all version data for the specified key.", "= { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point, path=path)", "versions, } api_path = utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path,", ":return: The JSON response of the request. :rtype: dict \"\"\"", "has more than the configured allowed versions the oldest version", "return response.json() def read_secret_metadata(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the metadata and", "unicode :param max_versions: The number of versions to keep per", "This value applies to all keys, but a key's metadata", "will be removed. Supported methods: DELETE: /{mount_point}/metadata/{path}. Produces: 204 (empty", "performed on keys; do not encode sensitive information in key", "from reads, but the underlying data will not be removed.", "Produces: 204 (empty body) :param path: Specifies the path of", "return response.json() def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the secret", "body) :param path: Path :type path: str | unicode :param", "def read_secret_metadata(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the metadata and versions for", "cas_required setting on an existing path. Supported methods: POST: /{mount_point}/metadata/{path}.", "per key. This value applies to all keys, but a", "read. try: current_secret_version = self.read_secret_version( path=path, mount_point=mount_point, ) except exceptions.InvalidPath:", "to be set on all write requests. If false, the", "methods: GET: /auth/{mount_point}/config. Produces: 200 application/json :param mount_point: The \"path\"", "returned on read. :type secret: dict :param mount_point: The \"path\"", "be removed. A delete can be undone using the undelete", "Specifies the path of the secret to undelete. This is", "not set the latest version is returned. :type version: int", "of int :param mount_point: The \"path\" the secret engine was", "self._adapter.post( url=api_path, json=params, ) def undelete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Undelete", "will not be deleted, but it will no longer be", "value already exists, the calling token must have an ACL", "exceptions.InvalidPath('No value found at \"{path}\"; patch only works on existing", "cas=current_secret_version['data']['metadata']['version'], secret=patched_secret, mount_point=mount_point, ) def delete_latest_version_of_secret(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a", "set, the backend's configured max version is used. Once a", "does not yet exist, the calling token must have an", "= utils.format_url( '/v1/{mount_point}/config', mount_point=mount_point, ) response = self._adapter.get(url=api_path) return response.json()", ") return response.json() def update_metadata(self, path, max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Updates", "undone using the undelete path. Supported methods: POST: /{mount_point}/delete/{path}. Produces:", "methods: POST: /{mount_point}/delete/{path}. Produces: 204 (empty body) :param path: Specifies", "have an ACL policy granting the update capability. Supported methods:", "of the request. :rtype: dict \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point,", "version is not None: params['version'] = version api_path = utils.format_url('/v1/{mount_point}/data/{path}',", ") response = self._adapter.get(url=api_path) return response.json() def read_secret_version(self, path, version=None,", "{ 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point, path=path) return", "path. Supported methods: DELETE: /{mount_point}/data/{path}. Produces: 204 (empty body) :param", ":rtype: requests.Response \"\"\" params = { 'max_versions': max_versions, } if", "python # -*- coding: utf-8 -*- \"\"\"KvV2 methods module.\"\"\" from", "request. :rtype: dict \"\"\" # First, do a read. try:", "path of the secret to read. This is specified as", "len(versions) == 0: error_msg = 'argument to \"versions\" must be", "All version history will be removed. Supported methods: DELETE: /{mount_point}/metadata/{path}.", "unicode :param versions: The versions to undelete. The versions will", "'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point, path=path) return self._adapter.post(", "methods: DELETE: /{mount_point}/data/{path}. Produces: 204 (empty body) :param path: Specifies", "if cas is not None: params['options']['cas'] = cas api_path =", "unicode :param versions: The versions to destroy. Their data will", "value applies to all keys, but a key's metadata setting", "updated secret. return self.create_or_update_secret( path=path, cas=current_secret_version['data']['metadata']['version'], secret=patched_secret, mount_point=mount_point, ) def", "self._adapter.get(url=api_path) return response.json() def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the", "The input must be a folder; list on a file", "-*- \"\"\"KvV2 methods module.\"\"\" from hvac import exceptions, utils from", "return response.json() def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Set or update", "are not accessible via this command. Supported methods: LIST: /{mount_point}/metadata/{path}.", "of the request. :rtype: dict \"\"\" params = {} if", "versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Undelete the data for the provided version and", "or more integers, \"{versions}\" provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg) params", "None: params['options']['cas'] = cas api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response", "JSON response of the request. :rtype: dict \"\"\" params =", "to undelete. This is specified as part of the URL.", "unicode :return: The JSON response of the create_or_update_secret request. :rtype:", "{}, 'data': secret, } if cas is not None: params['options']['cas']", "def create_or_update_secret(self, path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Create a new version", "of the request. :rtype: dict \"\"\" api_path = utils.format_url( '/v1/{mount_point}/config',", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Configure backend level settings that are applied to every", "version of a secret at the specified location. If the", "be stored and returned on read. :type secret: dict :param", "will stop them from being returned from reads, but the", "POST: /{mount_point}/delete/{path}. Produces: 204 (empty body) :param path: Specifies the", "versions to be deleted. The versioned data will not be", "the key-value store. Supported methods: POST: /{mount_point}/destroy/{path}. Produces: 204 (empty", "to destroy. This is specified as part of the URL.", "allowing it to be returned on get requests. Supported methods:", "the cas parameter to be set on all write requests.", "def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Read the KV Version 2 configuration. Supported", "configured max version is used. Once a key has more", "on read. :type secret: dict :param mount_point: The \"path\" the", "path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Undelete the data for the provided version", "to 0 a write will only be allowed if the", "DELETE: /{mount_point}/data/{path}. Produces: 204 (empty body) :param path: Specifies the", "body) :param path: Specifies the path of the secret to", "requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) return self._adapter.delete( url=api_path,", "\"\"\"Undelete the data for the provided version and path in", "path=path) return self._adapter.post( url=api_path, json=params, ) def delete_metadata_and_all_versions(self, path, mount_point=DEFAULT_MOUNT_POINT):", "delete of the specified versions of the secret. This marks", "DELETE: /{mount_point}/metadata/{path}. Produces: 204 (empty body) :param path: Specifies the", "the request. :rtype: requests.Response \"\"\" params = { 'max_versions': max_versions,", "value. Note that no policy-based filtering is performed on keys;", "but it will no longer be returned in normal get", "exceptions.ParamValidationError(error_msg) params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/delete/{path}',", "stop it from being returned from reads, but the underlying", "api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, )", ") return response.json() def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Set or", "not isinstance(versions, list) or len(versions) == 0: error_msg = 'argument", "import VaultApiBase DEFAULT_MOUNT_POINT = 'secret' class KvV2(VaultApiBase): \"\"\"KV Secrets Engine", ") except exceptions.InvalidPath: raise exceptions.InvalidPath('No value found at \"{path}\"; patch", "the secret at the specified location. Supported methods: GET: /{mount_point}/data/{path}.", "be undone using the undelete path. Supported methods: POST: /{mount_point}/delete/{path}.", "will stop it from being returned from reads, but the", "The contents of the \"secret\" dict will be stored and", "coding: utf-8 -*- \"\"\"KvV2 methods module.\"\"\" from hvac import exceptions,", "\"\"\"Delete (permanently) the key metadata and all version data for", "can overwrite this value. Once a key has more than", "\"\"\" params = { 'options': {}, 'data': secret, } if", "path: Path :type path: str | unicode :param cas: Set", "methods module.\"\"\" from hvac import exceptions, utils from hvac.api.vault_api_base import", "delete of the secret's latest version at the specified location.", "version is returned. :type version: int :param mount_point: The \"path\"", ":type versions: int :param mount_point: The \"path\" the secret engine", "allowed. If set to 0 a write will only be", "return self._adapter.post( url=api_path, json=params, ) def destroy_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT):", "must be a list containing one or more integers, \"{versions}\"", "path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete of the secret's latest", "and path in the key-value store. This restores the data,", "path: str | unicode :param versions: The versions to undelete.", "secret, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Set or update data in the KV store", "= cas_required api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point) return self._adapter.post( url=api_path, json=params,", "version: Specifies the version to return. If not set the", "cas api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.post( url=api_path,", "if cas_required is not None: if not isinstance(cas_required, bool): error_msg", "(empty body) :param max_versions: The number of versions to keep", "utils.format_url('/v1/{mount_point}/config', mount_point=mount_point) return self._adapter.post( url=api_path, json=params, ) def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT):", "path: Path :type path: str | unicode :param secret: The", "file will not return a value. Note that no policy-based", "use a Check-And-Set operation. If not set the write will", "key-value store. Supported methods: POST: /{mount_point}/destroy/{path}. Produces: 204 (empty body)", "be allowed if the key doesn't exist. If the index", "store. Supported methods: POST: /{mount_point}/destroy/{path}. Produces: 204 (empty body) :param", "specified location. Supported methods: GET: /{mount_point}/data/{path}. Produces: 200 application/json :param", ":param path: Specifies the path of the secret to undelete.", "204 (empty body) :param path: Path :type path: str |", "will require the cas parameter to be set on all", "normal get requests. :type versions: list of int :param mount_point:", "calling token must have an ACL policy granting the create", "containing one or more integers, \"{versions}\" provided.'.format( versions=versions ) raise", "all write requests. :type cas_required: bool :param mount_point: The \"path\"", "Produces: 204 (empty body) :param max_versions: The number of versions", "application/json :param mount_point: The \"path\" the secret engine was mounted", "return a value. Note that no policy-based filtering is performed", "the write will be allowed. If set to 0 a", "raise exceptions.InvalidPath('No value found at \"{path}\"; patch only works on", ") def destroy_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Permanently remove the specified", "params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point,", "soft delete of the secret's latest version at the specified", "version will be permanently deleted. :type max_versions: int :param cas_required:", "location. This marks the version as deleted and will stop", "cas parameter to be set on all write requests. :type", "version data for the specified key. All version history will", "from hvac import exceptions, utils from hvac.api.vault_api_base import VaultApiBase DEFAULT_MOUNT_POINT", "provided.'.format( versions=versions ) raise exceptions.ParamValidationError(error_msg) params = { 'versions': versions,", "} api_path = utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params,", "cas_required: If true all keys will require the cas parameter", "will only be allowed if the key's current version matches", "POST: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Path :type path:", "\"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return self._adapter.delete( url=api_path, )", "URL. :type path: str | unicode :param mount_point: The \"path\"", "KV Version 2 configuration. Supported methods: GET: /auth/{mount_point}/config. Produces: 200", "do a read. try: current_secret_version = self.read_secret_version( path=path, mount_point=mount_point, )", "deleted. Defaults to 10. :type max_versions: int :param cas_required: If", "requests.Response \"\"\" params = {} if max_versions is not None:", "a soft delete of the specified versions of the secret.", "them from being returned from reads, but the underlying data", "at the specified location. Folders are suffixed with /. The", "the specified location. Supported methods: GET: /{mount_point}/data/{path}. Produces: 200 application/json", "response of the request. :rtype: dict \"\"\" params = {}", "a secret at the specified location. If the value does", "exist. If the index is non-zero the write will only", "= 'secret' class KvV2(VaultApiBase): \"\"\"KV Secrets Engine - Version 2", "list. This is specified as part of the URL. :type", "/auth/{mount_point}/config. Produces: 200 application/json :param mount_point: The \"path\" the secret", ") return response.json() def create_or_update_secret(self, path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Create", "utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def delete_metadata_and_all_versions(self,", "data will not be removed. A delete can be undone", "yet exist, the calling token must have an ACL policy", "backend's configured max version is used. Once a key has", "can be undone using the undelete path. Supported methods: POST:", "error_msg = 'argument to \"versions\" must be a list containing", ":rtype: dict \"\"\" # First, do a read. try: current_secret_version", "mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def list_secrets(self, path,", "| unicode :param secret: The contents of the \"secret\" dict", "= {} if max_versions is not None: params['max_versions'] = max_versions", "version will be permanently deleted. Defaults to 10. :type max_versions:", ":param path: Specifies the path of the secret to delete.", "response of the request. :rtype: requests.Response \"\"\" params = {}", "overwrite this value. Once a key has more than the", "ACL policy granting the update capability. Supported methods: POST: /{mount_point}/data/{path}.", "unicode :param mount_point: The \"path\" the secret engine was mounted", "| unicode :return: The JSON response of the request. :rtype:", "for the provided version and path in the key-value store.", "= { 'options': {}, 'data': secret, } if cas is", ":rtype: requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return self._adapter.delete(", "of the request. :rtype: dict \"\"\" params = { 'options':", "DEFAULT_MOUNT_POINT = 'secret' class KvV2(VaultApiBase): \"\"\"KV Secrets Engine - Version", "of the secret's latest version at the specified location. This", "the request. :rtype: requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path)", "application/json :param path: Specifies the path of the secrets to", "GET: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Specifies the path", "= { 'max_versions': max_versions, } if cas_required is not None:", "the configured allowed versions the oldest version will be permanently", "= utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) return self._adapter.delete( url=api_path, ) def delete_secret_versions(self,", "Their data will be permanently deleted. :type versions: list of", "of the \"secret\" dict will be stored and returned on", "\"secret\" dict will be stored and returned on read. :type", "utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def destroy_secret_versions(self,", "Supported methods: POST: /{mount_point}/delete/{path}. Produces: 204 (empty body) :param path:", "normal get requests. :type versions: int :param mount_point: The \"path\"", "dict will be stored and returned on read. :type secret:", "= utils.format_url('/v1/{mount_point}/config', mount_point=mount_point) return self._adapter.post( url=api_path, json=params, ) def read_configuration(self,", "Supported methods: POST: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Path", "get requests. :type versions: int :param mount_point: The \"path\" the", "0 a write will only be allowed if the key", "of the request. :rtype: requests.Response \"\"\" params = {} if", "be restored and their data will be returned on normal", "in the key-value store. Supported methods: POST: /{mount_point}/config. Produces: 204", "was mounted on. :type mount_point: str | unicode :return: The", "POST: /{mount_point}/config. Produces: 204 (empty body) :param max_versions: The number", "secret engine was mounted on. :type mount_point: str | unicode", "the specified location. Folders are suffixed with /. The input", "to read. This is specified as part of the URL.", "path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Permanently remove the specified version data and", "| unicode :param mount_point: The \"path\" the secret engine was", "response.json() def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Set or update data", "request. :rtype: dict \"\"\" params = {} if version is", "set on all write requests. If false, the backend's configuration", "exists, the calling token must have an ACL policy granting", "permanently deleted. Defaults to 10. :type max_versions: int :param cas_required:", "destroy. This is specified as part of the URL. :type", "json=params, ) def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Return a list of", "If not set the latest version is returned. :type version:", "to \"versions\" must be a list containing one or more", "the key's current version matches the version specified in the", "Version 2 (API). Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html \"\"\" def configure(self, max_versions=10, cas_required=None,", "path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete of the specified", "version and path in the key-value store. This restores the", "cas_required is not None: if not isinstance(cas_required, bool): error_msg =", "= 'argument to \"versions\" must be a list containing one", "for the specified key. All version history will be removed.", "data will not be deleted, but it will no longer", "dict \"\"\" # First, do a read. try: current_secret_version =", "allowed versions the oldest version will be permanently deleted. Defaults", "dict. patched_secret = current_secret_version['data']['data'] patched_secret.update(secret) # Write back updated secret.", "| unicode :param versions: The versions to undelete. The versions", "mount_point=mount_point, ) def delete_latest_version_of_secret(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete", "versions will be restored and their data will be returned", "back updated secret. return self.create_or_update_secret( path=path, cas=current_secret_version['data']['metadata']['version'], secret=patched_secret, mount_point=mount_point, )", "to return. If not set the latest version is returned.", "Specifies the path of the secret to read. This is", "cas parameter. :type cas: int :param secret: The contents of", "secret, cas=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Create a new version of a secret", "versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete of the specified versions", "= cas_required api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path,", "params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point,", "the provided version and path in the key-value store. This", "the data for the provided version and path in the", "removed. Supported methods: DELETE: /{mount_point}/metadata/{path}. Produces: 204 (empty body) :param", "from being returned from reads, but the underlying data will", "{ 'max_versions': max_versions, } if cas_required is not None: params['cas_required']", "be permanently deleted. Defaults to 10. :type max_versions: int :param", "cas_required: If true the key will require the cas parameter", "the key-value store. This restores the data, allowing it to", "value does not yet exist, the calling token must have", "cas parameter to be set on all write requests. If", "\"path\" the secret engine was mounted on. :type mount_point: str", "max_versions of cas_required setting on an existing path. Supported methods:", "of the secret to delete. This is specified as part", "encode sensitive information in key names. The values themselves are", "to 10. :type max_versions: int :param cas_required: If true all", "unicode :param versions: The versions to be deleted. The versioned", "versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Permanently remove the specified version data and numbers", "true the key will require the cas parameter to be", "Supported methods: POST: /{mount_point}/metadata/{path}. Produces: 204 (empty body) :param path:", "\"\"\"Create a new version of a secret at the specified", "existing data.'.format(path=path)) # Update existing secret dict. patched_secret = current_secret_version['data']['data']", "update capability. Supported methods: POST: /{mount_point}/data/{path}. Produces: 200 application/json :param", "} api_path = utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params,", "{ 'options': {}, 'data': secret, } if cas is not", "mounted on. :type mount_point: str | unicode :return: The JSON", "the specified versions of the secret. This marks the versions", "the update capability. Supported methods: POST: /{mount_point}/data/{path}. Produces: 200 application/json", "of versions to keep per key. This value applies to", "unicode :return: The JSON response of the request. :rtype: dict", "Write back updated secret. return self.create_or_update_secret( path=path, cas=current_secret_version['data']['metadata']['version'], secret=patched_secret, mount_point=mount_point,", "def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Set or update data in", "cas_required api_path = utils.format_url('/v1/{mount_point}/config', mount_point=mount_point) return self._adapter.post( url=api_path, json=params, )", "\"\"\" params = { 'max_versions': max_versions, } if cas_required is", "all keys will require the cas parameter to be set", "response of the request. :rtype: dict \"\"\" params = {", ":type max_versions: int :param cas_required: If true the key will", "\"\"\" # First, do a read. try: current_secret_version = self.read_secret_version(", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete of the specified versions of", "will be permanently deleted. :type max_versions: int :param cas_required: If", "index is non-zero the write will only be allowed if", "raise exceptions.ParamValidationError(error_msg) params = { 'versions': versions, } api_path =", "store. Supported methods: POST: /{mount_point}/config. Produces: 204 (empty body) :param", "unicode :param cas: Set the \"cas\" value to use a", "set the latest version is returned. :type version: int :param", "specified versions of the secret. This marks the versions as", "'max_versions': max_versions, } if cas_required is not None: params['cas_required'] =", "url=api_path, ) def delete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft", "\"\"\"KV Secrets Engine - Version 2 (API). Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html \"\"\"", "requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return self._adapter.delete( url=api_path,", "from the key-value store. Supported methods: POST: /{mount_point}/destroy/{path}. Produces: 204", "token must have an ACL policy granting the create capability.", "undelete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Undelete the data for the provided", "specified location. If the value does not yet exist, the", "key in the key-value store. Supported methods: POST: /{mount_point}/config. Produces:", "params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return self._adapter.post(", "None: if not isinstance(cas_required, bool): error_msg = 'bool expected for", "granting the create capability. If the value already exists, the", "requests. :type versions: list of int :param mount_point: The \"path\"", "key-value store. This restores the data, allowing it to be", "if the key doesn't exist. If the index is non-zero", "\"\"\"Issue a soft delete of the specified versions of the", "will be restored and their data will be returned on", "response of the request. :rtype: requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}',", "the secret engine was mounted on. :type mount_point: str |", "found at \"{path}\"; patch only works on existing data.'.format(path=path)) #", "this value. Once a key has more than the configured", "== 0: error_msg = 'argument to \"versions\" must be a", "{} if version is not None: params['version'] = version api_path", "self._adapter.post( url=api_path, json=params, ) def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Return a", "false, the backend's configuration will be used. :type cas_required: bool", "/{mount_point}/metadata/{path}. Produces: 200 application/json :param path: Specifies the path of", "not None: params['max_versions'] = max_versions if cas_required is not None:", "as deleted and will stop them from being returned from", "to all keys, but a key's metadata setting can overwrite", "2 (API). Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html \"\"\" def configure(self, max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT):", "key. This value applies to all keys, but a key's", "as deleted and will stop it from being returned from", "applies to all keys, but a key's metadata setting can", "'options': {}, 'data': secret, } if cas is not None:", "in normal get requests. :type versions: int :param mount_point: The", "exceptions.ParamValidationError(error_msg) params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/undelete/{path}',", "POST: /{mount_point}/destroy/{path}. Produces: 204 (empty body) :param path: Specifies the", "requests.Response \"\"\" if not isinstance(versions, list) or len(versions) == 0:", "isinstance(cas_required, bool): error_msg = 'bool expected for cas_required param, {type}", "request. :rtype: dict \"\"\" params = { 'options': {}, 'data':", "def configure(self, max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Configure backend level settings that", "key doesn't exist. If the index is non-zero the write", "key names at the specified location. Folders are suffixed with", "request. :rtype: requests.Response \"\"\" params = {} if max_versions is", "utils from hvac.api.vault_api_base import VaultApiBase DEFAULT_MOUNT_POINT = 'secret' class KvV2(VaultApiBase):", "\"\"\" params = {} if version is not None: params['version']", "# -*- coding: utf-8 -*- \"\"\"KvV2 methods module.\"\"\" from hvac", "The response of the request. :rtype: requests.Response \"\"\" api_path =", "path: str | unicode :param cas: Set the \"cas\" value", "str | unicode :return: The response of the request. :rtype:", "a value. Note that no policy-based filtering is performed on", "undone using the undelete path. Supported methods: DELETE: /{mount_point}/data/{path}. Produces:", "response = self._adapter.get(url=api_path) return response.json() def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT):", "utils.format_url( '/v1/{mount_point}/config', mount_point=mount_point, ) response = self._adapter.get(url=api_path) return response.json() def", "params = { 'max_versions': max_versions, } if cas_required is not", "on keys; do not encode sensitive information in key names.", "url=api_path, ) return response.json() def read_secret_metadata(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the", "input must be a folder; list on a file will", "dict :param mount_point: The \"path\" the secret engine was mounted", "mount_point: str | unicode :return: The response of the request.", "patch only works on existing data.'.format(path=path)) # Update existing secret", "request. :rtype: requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return", "oldest version will be permanently deleted. :type max_versions: int :param", "\"\"\"Retrieve the secret at the specified location. Supported methods: GET:", "\"\"\"Issue a soft delete of the secret's latest version at", "str | unicode :param versions: The versions to be deleted.", "200 application/json :param path: Specifies the path of the secret", "the key metadata and all version data for the specified", "versions: The versions to undelete. The versions will be restored", "but a key's metadata setting can overwrite this value. Once", "response of the request. :rtype: requests.Response \"\"\" params = {", ":param cas_required: If true all keys will require the cas", "not isinstance(cas_required, bool): error_msg = 'bool expected for cas_required param,", "str | unicode :param version: Specifies the version to return.", "path, max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Updates the max_versions of cas_required setting", ") def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Read the KV Version 2 configuration.", ") def delete_latest_version_of_secret(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete of", "of cas_required setting on an existing path. Supported methods: POST:", "write will only be allowed if the key doesn't exist.", "path=path, mount_point=mount_point, ) except exceptions.InvalidPath: raise exceptions.InvalidPath('No value found at", "it will no longer be returned in normal get requests.", "This marks the versions as deleted and will stop them", "will be allowed. If set to 0 a write will", "to undelete. The versions will be restored and their data", "params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point,", "allowed if the key doesn't exist. If the index is", "except exceptions.InvalidPath: raise exceptions.InvalidPath('No value found at \"{path}\"; patch only", "not None: if not isinstance(cas_required, bool): error_msg = 'bool expected", "list on a file will not return a value. Note", ":type path: str | unicode :param versions: The versions to", "of the URL. :type path: str | unicode :param mount_point:", "the URL. :type path: str | unicode :param mount_point: The", "secret dict. patched_secret = current_secret_version['data']['data'] patched_secret.update(secret) # Write back updated", "must have an ACL policy granting the update capability. Supported", "url=api_path, json=params, ) def undelete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Undelete the", "and will stop them from being returned from reads, but", "path. Supported methods: POST: /{mount_point}/metadata/{path}. Produces: 204 (empty body) :param", "The number of versions to keep per key. This value", "Update existing secret dict. patched_secret = current_secret_version['data']['data'] patched_secret.update(secret) # Write", "Path :type path: str | unicode :param max_versions: The number", "= utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def", "location. Folders are suffixed with /. The input must be", "/{mount_point}/undelete/{path}. Produces: 204 (empty body) :param path: Specifies the path", "params['max_versions'] = max_versions if cas_required is not None: if not", "return self._adapter.post( url=api_path, json=params, ) def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Return", "read_configuration(self, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Read the KV Version 2 configuration. Supported methods:", ") def list_secrets(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Return a list of key", "mounted on. :type mount_point: str | unicode :return: The response", "The versions to be deleted. The versioned data will not", "the backend's configured max version is used. Once a key", "at the specified path. Supported methods: GET: /{mount_point}/metadata/{path}. Produces: 200", "secret at the specified location. Supported methods: GET: /{mount_point}/data/{path}. Produces:", "If the value does not yet exist, the calling token", "Supported methods: DELETE: /{mount_point}/metadata/{path}. Produces: 204 (empty body) :param path:", "{type} received'.format(type=type(cas_required)) raise exceptions.ParamValidationError(error_msg) params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/metadata/{path}',", "cas_required param, {type} received'.format(type=type(cas_required)) raise exceptions.ParamValidationError(error_msg) params['cas_required'] = cas_required api_path", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the secret at the specified location. Supported methods:", "The versions will be restored and their data will be", "import exceptions, utils from hvac.api.vault_api_base import VaultApiBase DEFAULT_MOUNT_POINT = 'secret'", "returned. :type version: int :param mount_point: The \"path\" the secret", "10. :type max_versions: int :param cas_required: If true all keys", "of the URL. :type path: str | unicode :param versions:", "with /. The input must be a folder; list on", "KvV2(VaultApiBase): \"\"\"KV Secrets Engine - Version 2 (API). Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html", "of a secret at the specified location. If the value", "path: str | unicode :param mount_point: The \"path\" the secret", "parameter to be set on all write requests. If false,", "versions the oldest version will be permanently deleted. Defaults to", "versions, } api_path = utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path,", "destroy. Their data will be permanently deleted. :type versions: list", "do not encode sensitive information in key names. The values", "provided path from the key-value store. Supported methods: POST: /{mount_point}/destroy/{path}.", "exceptions, utils from hvac.api.vault_api_base import VaultApiBase DEFAULT_MOUNT_POINT = 'secret' class", "#!/usr/bin/env python # -*- coding: utf-8 -*- \"\"\"KvV2 methods module.\"\"\"", "to use a Check-And-Set operation. If not set the write", "Supported methods: POST: /{mount_point}/destroy/{path}. Produces: 204 (empty body) :param path:", "granting the update capability. Supported methods: POST: /{mount_point}/data/{path}. Produces: 200", "api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, )", "bool :param mount_point: The \"path\" the secret engine was mounted", ":type secret: dict :param mount_point: The \"path\" the secret engine", "utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, params=params, ) return", "params['version'] = version api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response =", "The response of the request. :rtype: requests.Response \"\"\" if not", ":param path: Path :type path: str | unicode :param secret:", "api_path = utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, )", ":rtype: requests.Response \"\"\" if not isinstance(versions, list) or len(versions) ==", "the calling token must have an ACL policy granting the", "to be set on all write requests. :type cas_required: bool", "path, version=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the secret at the specified location.", "int :param cas_required: If true the key will require the", "a folder; list on a file will not return a", "The JSON response of the request. :rtype: dict \"\"\" api_path", ":param cas_required: If true the key will require the cas", "a write will only be allowed if the key doesn't", "the version specified in the cas parameter. :type cas: int", "max_versions: The number of versions to keep per key. If", "the key will require the cas parameter to be set", "new version of a secret at the specified location. If", "self.create_or_update_secret( path=path, cas=current_secret_version['data']['metadata']['version'], secret=patched_secret, mount_point=mount_point, ) def delete_latest_version_of_secret(self, path, mount_point=DEFAULT_MOUNT_POINT):", "the max_versions of cas_required setting on an existing path. Supported", "marks the version as deleted and will stop it from", "deleted. The versioned data will not be deleted, but it", "Engine - Version 2 (API). Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html \"\"\" def configure(self,", "Supported methods: GET: /{mount_point}/data/{path}. Produces: 200 application/json :param path: Specifies", "- Version 2 (API). Reference: https://www.vaultproject.io/api/secret/kv/kv-v2.html \"\"\" def configure(self, max_versions=10,", ":param versions: The versions to undelete. The versions will be", "delete can be undone using the undelete path. Supported methods:", "write requests. :type cas_required: bool :param mount_point: The \"path\" the", "is performed on keys; do not encode sensitive information in", "response = self._adapter.get( url=api_path, ) return response.json() def update_metadata(self, path,", "permanently deleted. :type max_versions: int :param cas_required: If true the", "the URL. :type path: str | unicode :param version: Specifies", "be set on all write requests. :type cas_required: bool :param", "in key names. The values themselves are not accessible via", "or update data in the KV store without overwriting. :param", "The JSON response of the create_or_update_secret request. :rtype: dict \"\"\"", "of key names at the specified location. Folders are suffixed", "This is specified as part of the URL. :type path:", "cas_required is not None: params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/config',", "not set, the backend's configured max version is used. Once", "the create_or_update_secret request. :rtype: dict \"\"\" # First, do a", "versions for the secret at the specified path. Supported methods:", "hvac import exceptions, utils from hvac.api.vault_api_base import VaultApiBase DEFAULT_MOUNT_POINT =", "# First, do a read. try: current_secret_version = self.read_secret_version( path=path,", "json=params, ) def delete_metadata_and_all_versions(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Delete (permanently) the key", "the value already exists, the calling token must have an", "from hvac.api.vault_api_base import VaultApiBase DEFAULT_MOUNT_POINT = 'secret' class KvV2(VaultApiBase): \"\"\"KV", "the request. :rtype: requests.Response \"\"\" params = {} if max_versions", "be permanently deleted. :type versions: list of int :param mount_point:", "A delete can be undone using the undelete path. Supported", "= self._adapter.post( url=api_path, json=params, ) return response.json() def patch(self, path,", "specified in the cas parameter. :type cas: int :param secret:", "path of the secret to delete. This is specified as", "data.'.format(path=path)) # Update existing secret dict. patched_secret = current_secret_version['data']['data'] patched_secret.update(secret)", "will be permanently deleted. Defaults to 10. :type max_versions: int", "read. This is specified as part of the URL. :type", "api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, params=params,", "on a file will not return a value. Note that", "a list containing one or more integers, \"{versions}\" provided.'.format( versions=versions", "of the URL. :type path: str | unicode :param version:", "secret: dict :param mount_point: The \"path\" the secret engine was", "create capability. If the value already exists, the calling token", "methods: POST: /{mount_point}/config. Produces: 204 (empty body) :param max_versions: The", "list containing one or more integers, \"{versions}\" provided.'.format( versions=versions )", "must be a folder; list on a file will not", "deleted and will stop it from being returned from reads,", "api_path = utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, )", "| unicode :return: The JSON response of the create_or_update_secret request.", "version: int :param mount_point: The \"path\" the secret engine was", "The versions to undelete. The versions will be restored and", "not accessible via this command. Supported methods: LIST: /{mount_point}/metadata/{path}. Produces:", "\"cas\" value to use a Check-And-Set operation. If not set", "exceptions.ParamValidationError(error_msg) params = { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/destroy/{path}',", "methods: LIST: /{mount_point}/metadata/{path}. Produces: 200 application/json :param path: Specifies the", "existing path. Supported methods: POST: /{mount_point}/metadata/{path}. Produces: 204 (empty body)", "is used. Once a key has more than the configured", "of the secrets to list. This is specified as part", "suffixed with /. The input must be a folder; list", ":return: The response of the request. :rtype: requests.Response \"\"\" if", "path: str | unicode :param max_versions: The number of versions", "setting can overwrite this value. Once a key has more", "Produces: 200 application/json :param path: Specifies the path of the", "Supported methods: POST: /{mount_point}/config. Produces: 204 (empty body) :param max_versions:", "update data in the KV store without overwriting. :param path:", "Check-And-Set operation. If not set the write will be allowed.", "is specified as part of the URL. :type path: str", "their data will be returned on normal get requests. :type", "the backend's configuration will be used. :type cas_required: bool :param", "request. :rtype: requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) return", "versions, } api_path = utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path,", "expected for cas_required param, {type} received'.format(type=type(cas_required)) raise exceptions.ParamValidationError(error_msg) params['cas_required'] =", "\"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.list( url=api_path,", "utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def list_secrets(self,", "key's metadata setting can overwrite this value. Once a key", "stored and returned on read. :type secret: dict :param mount_point:", "'/v1/{mount_point}/config', mount_point=mount_point, ) response = self._adapter.get(url=api_path) return response.json() def read_secret_version(self,", "names. The values themselves are not accessible via this command.", "for cas_required param, {type} received'.format(type=type(cas_required)) raise exceptions.ParamValidationError(error_msg) params['cas_required'] = cas_required", "the secret to undelete. This is specified as part of", "write requests. If false, the backend's configuration will be used.", "delete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete of the", "/{mount_point}/metadata/{path}. Produces: 204 (empty body) :param path: Specifies the path", "be allowed. If set to 0 a write will only", "secrets to list. This is specified as part of the", "utils.format_url('/v1/{mount_point}/delete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def undelete_secret_versions(self,", "will no longer be returned in normal get requests. :type", "/{mount_point}/data/{path}. Produces: 200 application/json :param path: Path :type path: str", "at the specified location. If the value does not yet", "path=path) response = self._adapter.get( url=api_path, ) return response.json() def update_metadata(self,", "information in key names. The values themselves are not accessible", "engine was mounted on. :type mount_point: str | unicode :return:", "return self._adapter.delete( url=api_path, ) def delete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue", "specified as part of the URL. :type path: str |", "the request. :rtype: dict \"\"\" params = { 'options': {},", "parameter. :type cas: int :param secret: The contents of the", "dict \"\"\" params = {} if version is not None:", "the secrets to list. This is specified as part of", "of the request. :rtype: requests.Response \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point,", "mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def destroy_secret_versions(self, path,", "json=params, ) def undelete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Undelete the data", "requests.Response \"\"\" params = { 'max_versions': max_versions, } if cas_required", "a new version of a secret at the specified location.", "the secret. This marks the versions as deleted and will", "self._adapter.post( url=api_path, json=params, ) def destroy_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Permanently", "/{mount_point}/data/{path}. Produces: 200 application/json :param path: Specifies the path of", "mount_point: The \"path\" the secret engine was mounted on. :type", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Set or update data in the KV store without", "returned from reads, but the underlying data will not be", "KV store without overwriting. :param path: Path :type path: str", "specified version data and numbers for the provided path from", "be allowed if the key's current version matches the version", "key's current version matches the version specified in the cas", "removed. A delete can be undone using the undelete path.", ":type max_versions: int :param cas_required: If true all keys will", "path: Path :type path: str | unicode :param max_versions: The", "The number of versions to keep per key. If not", "non-zero the write will only be allowed if the key's", "cas is not None: params['options']['cas'] = cas api_path = utils.format_url('/v1/{mount_point}/data/{path}',", "mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, params=params, ) return response.json()", "in the cas parameter. :type cas: int :param secret: The", "mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, ) return response.json() def", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete of the secret's latest version", "path=path) return self._adapter.delete( url=api_path, ) def delete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT):", "Folders are suffixed with /. The input must be a", "If not set the write will be allowed. If set", "max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Updates the max_versions of cas_required setting on", "} if cas_required is not None: params['cas_required'] = cas_required api_path", ") raise exceptions.ParamValidationError(error_msg) params = { 'versions': versions, } api_path", "on. :type mount_point: str | unicode :return: The response of", "on existing data.'.format(path=path)) # Update existing secret dict. patched_secret =", "keys; do not encode sensitive information in key names. The", ":rtype: dict \"\"\" params = { 'options': {}, 'data': secret,", ":type cas_required: bool :param mount_point: The \"path\" the secret engine", "response.json() def update_metadata(self, path, max_versions=None, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Updates the max_versions", "Produces: 200 application/json :param path: Path :type path: str |", "response.json() def read_secret_metadata(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the metadata and versions", "of the specified versions of the secret. This marks the", "policy granting the update capability. Supported methods: POST: /{mount_point}/data/{path}. Produces:", "the KV store without overwriting. :param path: Path :type path:", "for the provided path from the key-value store. Supported methods:", "undelete. This is specified as part of the URL. :type", "on. :type mount_point: str | unicode :return: The JSON response", "the \"cas\" value to use a Check-And-Set operation. If not", "received'.format(type=type(cas_required)) raise exceptions.ParamValidationError(error_msg) params['cas_required'] = cas_required api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point,", "the data, allowing it to be returned on get requests.", "accessible via this command. Supported methods: LIST: /{mount_point}/metadata/{path}. Produces: 200", "\"\"\" api_path = utils.format_url( '/v1/{mount_point}/config', mount_point=mount_point, ) response = self._adapter.get(url=api_path)", "class KvV2(VaultApiBase): \"\"\"KV Secrets Engine - Version 2 (API). Reference:", "deleted. :type max_versions: int :param cas_required: If true the key", "url=api_path, json=params, ) def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Read the KV Version", "version history will be removed. Supported methods: DELETE: /{mount_point}/metadata/{path}. Produces:", "have an ACL policy granting the create capability. If the", "the version as deleted and will stop it from being", "path=path) response = self._adapter.get( url=api_path, params=params, ) return response.json() def", "if max_versions is not None: params['max_versions'] = max_versions if cas_required", "= self._adapter.list( url=api_path, ) return response.json() def read_secret_metadata(self, path, mount_point=DEFAULT_MOUNT_POINT):", "current_secret_version = self.read_secret_version( path=path, mount_point=mount_point, ) except exceptions.InvalidPath: raise exceptions.InvalidPath('No", "keys will require the cas parameter to be set on", "| unicode :param versions: The versions to destroy. Their data", ":rtype: dict \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response =", "is not None: params['max_versions'] = max_versions if cas_required is not", "path=path) return self._adapter.post( url=api_path, json=params, ) def undelete_secret_versions(self, path, versions,", "version api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.get( url=api_path,", "on get requests. Supported methods: POST: /{mount_point}/undelete/{path}. Produces: 204 (empty", "the metadata and versions for the secret at the specified", "or len(versions) == 0: error_msg = 'argument to \"versions\" must", "= utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, ) def", "the secret to destroy. This is specified as part of", "if not isinstance(cas_required, bool): error_msg = 'bool expected for cas_required", "str | unicode :return: The JSON response of the create_or_update_secret", "existing secret dict. patched_secret = current_secret_version['data']['data'] patched_secret.update(secret) # Write back", "mount_point=DEFAULT_MOUNT_POINT): \"\"\"Updates the max_versions of cas_required setting on an existing", "store. This restores the data, allowing it to be returned", "only be allowed if the key doesn't exist. If the", "\"\"\"Updates the max_versions of cas_required setting on an existing path.", "Supported methods: LIST: /{mount_point}/metadata/{path}. Produces: 200 application/json :param path: Specifies", "overwriting. :param path: Path :type path: str | unicode :param", "that are applied to every key in the key-value store.", "on normal get requests. :type versions: list of int :param", "will not be removed. A delete can be undone using", "self._adapter.post( url=api_path, json=params, ) def read_configuration(self, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Read the KV", "str | unicode :param versions: The versions to destroy. Their", "if not isinstance(versions, list) or len(versions) == 0: error_msg =", "unicode :param version: Specifies the version to return. If not", "versions: list of int :param mount_point: The \"path\" the secret", "the request. :rtype: dict \"\"\" params = {} if version", "def delete_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft delete of", "a file will not return a value. Note that no", "at the specified location. Supported methods: GET: /{mount_point}/data/{path}. Produces: 200", "via this command. Supported methods: LIST: /{mount_point}/metadata/{path}. Produces: 200 application/json", "is not None: params['version'] = version api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point,", "204 (empty body) :param path: Specifies the path of the", "} api_path = utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params,", "data will be permanently deleted. :type versions: list of int", "200 application/json :param mount_point: The \"path\" the secret engine was", "versioned data will not be deleted, but it will no", "read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve the secret at the specified", "max_versions=10, cas_required=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Configure backend level settings that are applied", "api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.post( url=api_path, json=params,", "path=path) response = self._adapter.list( url=api_path, ) return response.json() def read_secret_metadata(self,", "LIST: /{mount_point}/metadata/{path}. Produces: 200 application/json :param path: Specifies the path", "version data and numbers for the provided path from the", "secret. This marks the versions as deleted and will stop", "Produces: 204 (empty body) :param path: Path :type path: str", "level settings that are applied to every key in the", "int :param cas_required: If true all keys will require the", "api_path = utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point, path=path) return self._adapter.post( url=api_path, json=params, )", "application/json :param path: Specifies the path of the secret to", ":type cas: int :param secret: The contents of the \"secret\"", "response of the request. :rtype: dict \"\"\" api_path = utils.format_url('/v1/{mount_point}/metadata/{path}',", "oldest version will be permanently deleted. Defaults to 10. :type", "it from being returned from reads, but the underlying data", "Defaults to 10. :type max_versions: int :param cas_required: If true", "only be allowed if the key's current version matches the", "if version is not None: params['version'] = version api_path =", "self.read_secret_version( path=path, mount_point=mount_point, ) except exceptions.InvalidPath: raise exceptions.InvalidPath('No value found", "will not return a value. Note that no policy-based filtering", "\"\"\" params = {} if max_versions is not None: params['max_versions']", ":param version: Specifies the version to return. If not set", "this command. Supported methods: LIST: /{mount_point}/metadata/{path}. Produces: 200 application/json :param", "versions to destroy. Their data will be permanently deleted. :type", "= utils.format_url('/v1/{mount_point}/metadata/{path}', mount_point=mount_point, path=path) response = self._adapter.get( url=api_path, ) return", "Produces: 200 application/json :param mount_point: The \"path\" the secret engine", "value to use a Check-And-Set operation. If not set the", "number of versions to keep per key. This value applies", "path=path, cas=current_secret_version['data']['metadata']['version'], secret=patched_secret, mount_point=mount_point, ) def delete_latest_version_of_secret(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue", "2 configuration. Supported methods: GET: /auth/{mount_point}/config. Produces: 200 application/json :param", "secret=patched_secret, mount_point=mount_point, ) def delete_latest_version_of_secret(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Issue a soft", ":param path: Path :type path: str | unicode :param cas:", "the \"secret\" dict will be stored and returned on read.", "numbers for the provided path from the key-value store. Supported", "capability. Supported methods: POST: /{mount_point}/data/{path}. Produces: 200 application/json :param path:", "= self._adapter.get(url=api_path) return response.json() def read_secret_version(self, path, version=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Retrieve", "returned on get requests. Supported methods: POST: /{mount_point}/undelete/{path}. Produces: 204", "versions to undelete. The versions will be restored and their", "and numbers for the provided path from the key-value store.", "and all version data for the specified key. All version", "= { 'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/destroy/{path}', mount_point=mount_point, path=path)", "at the specified location. This marks the version as deleted", "of the secret. This marks the versions as deleted and", "create_or_update_secret(self, path, secret, cas=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Create a new version of", "can be undone using the undelete path. Supported methods: DELETE:", "set the write will be allowed. If set to 0", "patched_secret.update(secret) # Write back updated secret. return self.create_or_update_secret( path=path, cas=current_secret_version['data']['metadata']['version'],", "cas=None, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Create a new version of a secret at", "an ACL policy granting the create capability. If the value", "number of versions to keep per key. If not set,", "The versioned data will not be deleted, but it will", "stop them from being returned from reads, but the underlying", "body) :param max_versions: The number of versions to keep per", ":rtype: dict \"\"\" params = {} if version is not", "policy-based filtering is performed on keys; do not encode sensitive", "using the undelete path. Supported methods: POST: /{mount_point}/delete/{path}. Produces: 204", ":param versions: The versions to destroy. Their data will be", "path of the secrets to list. This is specified as", "url=api_path, json=params, ) return response.json() def patch(self, path, secret, mount_point=DEFAULT_MOUNT_POINT):", "set to 0 a write will only be allowed if", "mount_point=mount_point, path=path) response = self._adapter.list( url=api_path, ) return response.json() def", "path in the key-value store. This restores the data, allowing", "= 'bool expected for cas_required param, {type} received'.format(type=type(cas_required)) raise exceptions.ParamValidationError(error_msg)", "'versions': versions, } api_path = utils.format_url('/v1/{mount_point}/undelete/{path}', mount_point=mount_point, path=path) return self._adapter.post(", "/{mount_point}/destroy/{path}. Produces: 204 (empty body) :param path: Specifies the path", "the versions as deleted and will stop them from being", "will only be allowed if the key doesn't exist. If", "json=params, ) def destroy_secret_versions(self, path, versions, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Permanently remove the", "response = self._adapter.get( url=api_path, params=params, ) return response.json() def create_or_update_secret(self,", "true all keys will require the cas parameter to be", "return self._adapter.post( url=api_path, json=params, ) def delete_metadata_and_all_versions(self, path, mount_point=DEFAULT_MOUNT_POINT): \"\"\"Delete", "bool): error_msg = 'bool expected for cas_required param, {type} received'.format(type=type(cas_required))", "= cas api_path = utils.format_url('/v1/{mount_point}/data/{path}', mount_point=mount_point, path=path) response = self._adapter.post(" ]
[ "vulkan examples to attached device, this may take some time!", "(Y/N)\").lower() == 'y' if answer: BUILD_ARGUMENTS = \"\" for arg", "this may take some time! (Y/N)\").lower() == 'y' if answer:", "all examples to connected device(s) import subprocess import sys answer", "if subprocess.call((\"python build-all.py -deploy %s\" % BUILD_ARGUMENTS).split(' ')) != 0:", "subprocess import sys answer = input(\"Install all vulkan examples to", "%s\" % BUILD_ARGUMENTS).split(' ')) != 0: print(\"Error: Not all examples", "input(\"Install all vulkan examples to attached device, this may take", "= \"\" for arg in sys.argv[1:]: if arg == \"-validation\":", "\"-validation\" if subprocess.call((\"python build-all.py -deploy %s\" % BUILD_ARGUMENTS).split(' ')) !=", "= input(\"Install all vulkan examples to attached device, this may", "to connected device(s) import subprocess import sys answer = input(\"Install", "\"-validation\": BUILD_ARGUMENTS += \"-validation\" if subprocess.call((\"python build-all.py -deploy %s\" %", "may take some time! (Y/N)\").lower() == 'y' if answer: BUILD_ARGUMENTS", "to attached device, this may take some time! (Y/N)\").lower() ==", "if answer: BUILD_ARGUMENTS = \"\" for arg in sys.argv[1:]: if", "build-all.py -deploy %s\" % BUILD_ARGUMENTS).split(' ')) != 0: print(\"Error: Not", "BUILD_ARGUMENTS += \"-validation\" if subprocess.call((\"python build-all.py -deploy %s\" % BUILD_ARGUMENTS).split('", "sys answer = input(\"Install all vulkan examples to attached device,", "answer = input(\"Install all vulkan examples to attached device, this", "+= \"-validation\" if subprocess.call((\"python build-all.py -deploy %s\" % BUILD_ARGUMENTS).split(' '))", "0: print(\"Error: Not all examples may have been installed!\") sys.exit(-1)", "device(s) import subprocess import sys answer = input(\"Install all vulkan", "sys.argv[1:]: if arg == \"-validation\": BUILD_ARGUMENTS += \"-validation\" if subprocess.call((\"python", "arg in sys.argv[1:]: if arg == \"-validation\": BUILD_ARGUMENTS += \"-validation\"", "if arg == \"-validation\": BUILD_ARGUMENTS += \"-validation\" if subprocess.call((\"python build-all.py", "Install all examples to connected device(s) import subprocess import sys", "connected device(s) import subprocess import sys answer = input(\"Install all", "device, this may take some time! (Y/N)\").lower() == 'y' if", "subprocess.call((\"python build-all.py -deploy %s\" % BUILD_ARGUMENTS).split(' ')) != 0: print(\"Error:", "attached device, this may take some time! (Y/N)\").lower() == 'y'", "# Install all examples to connected device(s) import subprocess import", "arg == \"-validation\": BUILD_ARGUMENTS += \"-validation\" if subprocess.call((\"python build-all.py -deploy", "take some time! (Y/N)\").lower() == 'y' if answer: BUILD_ARGUMENTS =", "in sys.argv[1:]: if arg == \"-validation\": BUILD_ARGUMENTS += \"-validation\" if", "== 'y' if answer: BUILD_ARGUMENTS = \"\" for arg in", "-deploy %s\" % BUILD_ARGUMENTS).split(' ')) != 0: print(\"Error: Not all", "import sys answer = input(\"Install all vulkan examples to attached", "for arg in sys.argv[1:]: if arg == \"-validation\": BUILD_ARGUMENTS +=", "% BUILD_ARGUMENTS).split(' ')) != 0: print(\"Error: Not all examples may", "== \"-validation\": BUILD_ARGUMENTS += \"-validation\" if subprocess.call((\"python build-all.py -deploy %s\"", "import subprocess import sys answer = input(\"Install all vulkan examples", "\"\" for arg in sys.argv[1:]: if arg == \"-validation\": BUILD_ARGUMENTS", "answer: BUILD_ARGUMENTS = \"\" for arg in sys.argv[1:]: if arg", "time! (Y/N)\").lower() == 'y' if answer: BUILD_ARGUMENTS = \"\" for", "all vulkan examples to attached device, this may take some", "')) != 0: print(\"Error: Not all examples may have been", "'y' if answer: BUILD_ARGUMENTS = \"\" for arg in sys.argv[1:]:", "examples to connected device(s) import subprocess import sys answer =", "BUILD_ARGUMENTS = \"\" for arg in sys.argv[1:]: if arg ==", "!= 0: print(\"Error: Not all examples may have been installed!\")", "BUILD_ARGUMENTS).split(' ')) != 0: print(\"Error: Not all examples may have", "examples to attached device, this may take some time! (Y/N)\").lower()", "some time! (Y/N)\").lower() == 'y' if answer: BUILD_ARGUMENTS = \"\"" ]
[ "get_moonboard(2017), CRGProblemAdapter(), config ) ahoughton_renderer_2016 = ProblemRenderer( get_moonboard(2016), AhoughtonAdapter(), config", "ahoughton_renderer_2016 = ProblemRenderer( get_moonboard(2016), AhoughtonAdapter(), config ) ahoughton_generator_2016 = AhoughtonGenerator(year=2016,", "adapter test # 2016 problem = ahoughton_generator_2016.generate() ahoughton_renderer_2016.render_problem(problem) # 2017", "ahoughton_renderer_2017 = ProblemRenderer( get_moonboard(2017), AhoughtonAdapter(), config ) ahoughton_generator_2017 = AhoughtonGenerator(year=2017,", "import json def main(): # Create Renderer config = RendererConfig()", "json.load(f) crg_renderer.render_problem(crg_problems['1']) # Ahoughton generator and adapter test # 2016", "from generators.ahoughton import AhoughtonGenerator from render_config import RendererConfig from problem_renderer", "# 2016 problem = ahoughton_generator_2016.generate() ahoughton_renderer_2016.render_problem(problem) # 2017 problem =", "and adapter test # 2016 problem = ahoughton_generator_2016.generate() ahoughton_renderer_2016.render_problem(problem) #", "crg_renderer.render_problem(crg_problems['1']) # Ahoughton generator and adapter test # 2016 problem", "renderer = ProblemRenderer( get_moonboard(2017), DefaultProblemAdapter(), config ) crg_renderer = ProblemRenderer(", "DefaultProblemAdapter from adapters.crg import CRGProblemAdapter from adapters.ahoughton import AhoughtonAdapter import", "DefaultProblemAdapter(), config ) crg_renderer = ProblemRenderer( get_moonboard(2017), CRGProblemAdapter(), config )", "render_config import RendererConfig from problem_renderer import ProblemRenderer from moonboard import", "2017 problem = ahoughton_generator_2017.generate() ahoughton_renderer_2017.render_problem(problem) if __name__ == \"__main__\": main()", "from adapters.ahoughton import AhoughtonAdapter import json def main(): # Create", "renderer.render_problem(problems['339318'], with_info=True) with open('data/crg.json', 'r') as f: crg_problems = json.load(f)", "json.load(f) renderer.render_problem(problems['339318'], with_info=True) with open('data/crg.json', 'r') as f: crg_problems =", "CRGProblemAdapter from adapters.ahoughton import AhoughtonAdapter import json def main(): #", "def main(): # Create Renderer config = RendererConfig() renderer =", "get_moonboard(2017), DefaultProblemAdapter(), config ) crg_renderer = ProblemRenderer( get_moonboard(2017), CRGProblemAdapter(), config", "import AhoughtonGenerator from render_config import RendererConfig from problem_renderer import ProblemRenderer", "= ProblemRenderer( get_moonboard(2016), AhoughtonAdapter(), config ) ahoughton_generator_2016 = AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe')", "problem_renderer import ProblemRenderer from moonboard import get_moonboard from adapters.default import", "import get_moonboard from adapters.default import DefaultProblemAdapter from adapters.crg import CRGProblemAdapter", "problem = ahoughton_generator_2016.generate() ahoughton_renderer_2016.render_problem(problem) # 2017 problem = ahoughton_generator_2017.generate() ahoughton_renderer_2017.render_problem(problem)", "ahoughton_renderer_2016.render_problem(problem) # 2017 problem = ahoughton_generator_2017.generate() ahoughton_renderer_2017.render_problem(problem) if __name__ ==", "with open('data/problems.json', 'r') as f: problems = json.load(f) renderer.render_problem(problems['339318'], with_info=True)", "with_info=True) with open('data/crg.json', 'r') as f: crg_problems = json.load(f) crg_renderer.render_problem(crg_problems['1'])", "import AhoughtonAdapter import json def main(): # Create Renderer config", "= ahoughton_generator_2016.generate() ahoughton_renderer_2016.render_problem(problem) # 2017 problem = ahoughton_generator_2017.generate() ahoughton_renderer_2017.render_problem(problem) if", "ProblemRenderer from moonboard import get_moonboard from adapters.default import DefaultProblemAdapter from", "ahoughton_generator_2016.generate() ahoughton_renderer_2016.render_problem(problem) # 2017 problem = ahoughton_generator_2017.generate() ahoughton_renderer_2017.render_problem(problem) if __name__", "from adapters.default import DefaultProblemAdapter from adapters.crg import CRGProblemAdapter from adapters.ahoughton", "AhoughtonAdapter(), config ) ahoughton_generator_2016 = AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe') ahoughton_renderer_2017 = ProblemRenderer(", "= RendererConfig() renderer = ProblemRenderer( get_moonboard(2017), DefaultProblemAdapter(), config ) crg_renderer", "problems = json.load(f) renderer.render_problem(problems['339318'], with_info=True) with open('data/crg.json', 'r') as f:", "import RendererConfig from problem_renderer import ProblemRenderer from moonboard import get_moonboard", "CRGProblemAdapter(), config ) ahoughton_renderer_2016 = ProblemRenderer( get_moonboard(2016), AhoughtonAdapter(), config )", "as f: problems = json.load(f) renderer.render_problem(problems['339318'], with_info=True) with open('data/crg.json', 'r')", "f: crg_problems = json.load(f) crg_renderer.render_problem(crg_problems['1']) # Ahoughton generator and adapter", "# Create Renderer config = RendererConfig() renderer = ProblemRenderer( get_moonboard(2017),", "ahoughton_generator_2017 = AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe') # Load data with open('data/problems.json', 'r')", "'r') as f: problems = json.load(f) renderer.render_problem(problems['339318'], with_info=True) with open('data/crg.json',", "import CRGProblemAdapter from adapters.ahoughton import AhoughtonAdapter import json def main():", "from render_config import RendererConfig from problem_renderer import ProblemRenderer from moonboard", "ProblemRenderer( get_moonboard(2017), CRGProblemAdapter(), config ) ahoughton_renderer_2016 = ProblemRenderer( get_moonboard(2016), AhoughtonAdapter(),", "Load data with open('data/problems.json', 'r') as f: problems = json.load(f)", "AhoughtonGenerator from render_config import RendererConfig from problem_renderer import ProblemRenderer from", "# Load data with open('data/problems.json', 'r') as f: problems =", "= ProblemRenderer( get_moonboard(2017), CRGProblemAdapter(), config ) ahoughton_renderer_2016 = ProblemRenderer( get_moonboard(2016),", "get_moonboard(2016), AhoughtonAdapter(), config ) ahoughton_generator_2016 = AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe') ahoughton_renderer_2017 =", "= ProblemRenderer( get_moonboard(2017), DefaultProblemAdapter(), config ) crg_renderer = ProblemRenderer( get_moonboard(2017),", "= json.load(f) crg_renderer.render_problem(crg_problems['1']) # Ahoughton generator and adapter test #", "= json.load(f) renderer.render_problem(problems['339318'], with_info=True) with open('data/crg.json', 'r') as f: crg_problems", "main(): # Create Renderer config = RendererConfig() renderer = ProblemRenderer(", "config ) crg_renderer = ProblemRenderer( get_moonboard(2017), CRGProblemAdapter(), config ) ahoughton_renderer_2016", "config ) ahoughton_generator_2017 = AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe') # Load data with", "from adapters.crg import CRGProblemAdapter from adapters.ahoughton import AhoughtonAdapter import json", "with open('data/crg.json', 'r') as f: crg_problems = json.load(f) crg_renderer.render_problem(crg_problems['1']) #", "= AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe') ahoughton_renderer_2017 = ProblemRenderer( get_moonboard(2017), AhoughtonAdapter(), config )", "RendererConfig() renderer = ProblemRenderer( get_moonboard(2017), DefaultProblemAdapter(), config ) crg_renderer =", "get_moonboard from adapters.default import DefaultProblemAdapter from adapters.crg import CRGProblemAdapter from", "ahoughton_generator_2016 = AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe') ahoughton_renderer_2017 = ProblemRenderer( get_moonboard(2017), AhoughtonAdapter(), config", "get_moonboard(2017), AhoughtonAdapter(), config ) ahoughton_generator_2017 = AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe') # Load", "adapters.crg import CRGProblemAdapter from adapters.ahoughton import AhoughtonAdapter import json def", "f: problems = json.load(f) renderer.render_problem(problems['339318'], with_info=True) with open('data/crg.json', 'r') as", "crg_renderer = ProblemRenderer( get_moonboard(2017), CRGProblemAdapter(), config ) ahoughton_renderer_2016 = ProblemRenderer(", "adapters.default import DefaultProblemAdapter from adapters.crg import CRGProblemAdapter from adapters.ahoughton import", "= AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe') # Load data with open('data/problems.json', 'r') as", "config ) ahoughton_renderer_2016 = ProblemRenderer( get_moonboard(2016), AhoughtonAdapter(), config ) ahoughton_generator_2016", "ProblemRenderer( get_moonboard(2017), AhoughtonAdapter(), config ) ahoughton_generator_2017 = AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe') #", "Create Renderer config = RendererConfig() renderer = ProblemRenderer( get_moonboard(2017), DefaultProblemAdapter(),", "AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe') ahoughton_renderer_2017 = ProblemRenderer( get_moonboard(2017), AhoughtonAdapter(), config ) ahoughton_generator_2017", "from moonboard import get_moonboard from adapters.default import DefaultProblemAdapter from adapters.crg", "= ProblemRenderer( get_moonboard(2017), AhoughtonAdapter(), config ) ahoughton_generator_2017 = AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe')", ") ahoughton_generator_2017 = AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe') # Load data with open('data/problems.json',", "RendererConfig from problem_renderer import ProblemRenderer from moonboard import get_moonboard from", "ProblemRenderer( get_moonboard(2016), AhoughtonAdapter(), config ) ahoughton_generator_2016 = AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe') ahoughton_renderer_2017", "import ProblemRenderer from moonboard import get_moonboard from adapters.default import DefaultProblemAdapter", "generators.ahoughton import AhoughtonGenerator from render_config import RendererConfig from problem_renderer import", "AhoughtonAdapter import json def main(): # Create Renderer config =", "AhoughtonAdapter(), config ) ahoughton_generator_2017 = AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe') # Load data", "generator and adapter test # 2016 problem = ahoughton_generator_2016.generate() ahoughton_renderer_2016.render_problem(problem)", "moonboard import get_moonboard from adapters.default import DefaultProblemAdapter from adapters.crg import", ") ahoughton_renderer_2016 = ProblemRenderer( get_moonboard(2016), AhoughtonAdapter(), config ) ahoughton_generator_2016 =", ") ahoughton_generator_2016 = AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe') ahoughton_renderer_2017 = ProblemRenderer( get_moonboard(2017), AhoughtonAdapter(),", "open('data/crg.json', 'r') as f: crg_problems = json.load(f) crg_renderer.render_problem(crg_problems['1']) # Ahoughton", "'r') as f: crg_problems = json.load(f) crg_renderer.render_problem(crg_problems['1']) # Ahoughton generator", "2016 problem = ahoughton_generator_2016.generate() ahoughton_renderer_2016.render_problem(problem) # 2017 problem = ahoughton_generator_2017.generate()", "Renderer config = RendererConfig() renderer = ProblemRenderer( get_moonboard(2017), DefaultProblemAdapter(), config", "test # 2016 problem = ahoughton_generator_2016.generate() ahoughton_renderer_2016.render_problem(problem) # 2017 problem", "open('data/problems.json', 'r') as f: problems = json.load(f) renderer.render_problem(problems['339318'], with_info=True) with", "as f: crg_problems = json.load(f) crg_renderer.render_problem(crg_problems['1']) # Ahoughton generator and", "# Ahoughton generator and adapter test # 2016 problem =", "ProblemRenderer( get_moonboard(2017), DefaultProblemAdapter(), config ) crg_renderer = ProblemRenderer( get_moonboard(2017), CRGProblemAdapter(),", "from problem_renderer import ProblemRenderer from moonboard import get_moonboard from adapters.default", "crg_problems = json.load(f) crg_renderer.render_problem(crg_problems['1']) # Ahoughton generator and adapter test", ") crg_renderer = ProblemRenderer( get_moonboard(2017), CRGProblemAdapter(), config ) ahoughton_renderer_2016 =", "Ahoughton generator and adapter test # 2016 problem = ahoughton_generator_2016.generate()", "data with open('data/problems.json', 'r') as f: problems = json.load(f) renderer.render_problem(problems['339318'],", "config = RendererConfig() renderer = ProblemRenderer( get_moonboard(2017), DefaultProblemAdapter(), config )", "# 2017 problem = ahoughton_generator_2017.generate() ahoughton_renderer_2017.render_problem(problem) if __name__ == \"__main__\":", "AhoughtonGenerator(year=2017, driver_path='C:/.selenium_drivers/chromedriver.exe') # Load data with open('data/problems.json', 'r') as f:", "config ) ahoughton_generator_2016 = AhoughtonGenerator(year=2016, driver_path='C:/.selenium_drivers/chromedriver.exe') ahoughton_renderer_2017 = ProblemRenderer( get_moonboard(2017),", "driver_path='C:/.selenium_drivers/chromedriver.exe') ahoughton_renderer_2017 = ProblemRenderer( get_moonboard(2017), AhoughtonAdapter(), config ) ahoughton_generator_2017 =", "driver_path='C:/.selenium_drivers/chromedriver.exe') # Load data with open('data/problems.json', 'r') as f: problems", "adapters.ahoughton import AhoughtonAdapter import json def main(): # Create Renderer", "import DefaultProblemAdapter from adapters.crg import CRGProblemAdapter from adapters.ahoughton import AhoughtonAdapter", "json def main(): # Create Renderer config = RendererConfig() renderer" ]